Compare commits
10 Commits
29b2dbd69b
...
d78d82f8fa
| Author | SHA1 | Date | |
|---|---|---|---|
| d78d82f8fa | |||
| b62d6db866 | |||
| b35c79761f | |||
| 9806a970da | |||
| 7f445a24e0 | |||
| 5ecd691e41 | |||
| 67827bd051 | |||
| eca0a825dc | |||
| 6d64044296 | |||
| 02a5593411 |
20
.sqlx/query-0b4b51420a53deb3c37cea4c85c6dc5203cf7200dee8ed8cc6326ef243f716b0.json
generated
Normal file
20
.sqlx/query-0b4b51420a53deb3c37cea4c85c6dc5203cf7200dee8ed8cc6326ef243f716b0.json
generated
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO tags (id, name, workspace_id, creator_id, updated_at, deleted_at, permissions)\n SELECT * FROM UNNEST($1::bigint[], $2::text[], $3::bigint[], $4::bigint[], $5::timestamptz[], $6::timestamptz[], $7::text[])\n ON CONFLICT (id) DO UPDATE SET\n name = excluded.name,\n workspace_id = excluded.workspace_id,\n creator_id = excluded.creator_id,\n updated_at = excluded.updated_at,\n deleted_at = excluded.deleted_at,\n permissions = excluded.permissions\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8Array",
|
||||||
|
"TextArray",
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"TextArray"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "0b4b51420a53deb3c37cea4c85c6dc5203cf7200dee8ed8cc6326ef243f716b0"
|
||||||
|
}
|
||||||
23
.sqlx/query-107be36abd0886a0a6179c1c589ab1ceb3a43851c0b5a4e0efda0fcef7072b6e.json
generated
Normal file
23
.sqlx/query-107be36abd0886a0a6179c1c589ab1ceb3a43851c0b5a4e0efda0fcef7072b6e.json
generated
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO tracking_clients\n (id, updated_at, archived, creator_id, integration_provider, notes, name, server_deleted_at, workspace_id, permissions)\n SELECT * FROM UNNEST($1::bigint[], $2::timestamptz[], $3::bool[], $4::bigint[], $5::text[], $6::text[], $7::text[], $8::timestamptz[], $9::bigint[], $10::text[])\n ON CONFLICT (id) DO UPDATE SET\n updated_at = excluded.updated_at,\n archived = excluded.archived,\n creator_id = excluded.creator_id,\n integration_provider = excluded.integration_provider,\n notes = excluded.notes,\n name = excluded.name,\n server_deleted_at = excluded.server_deleted_at,\n workspace_id = excluded.workspace_id,\n permissions = excluded.permissions\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8Array",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"BoolArray",
|
||||||
|
"Int8Array",
|
||||||
|
"TextArray",
|
||||||
|
"TextArray",
|
||||||
|
"TextArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"Int8Array",
|
||||||
|
"TextArray"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "107be36abd0886a0a6179c1c589ab1ceb3a43851c0b5a4e0efda0fcef7072b6e"
|
||||||
|
}
|
||||||
20
.sqlx/query-1681e2e9011c799d91f79b45ab712963093766c6c8b86a0a46fa6e1b3e1dca7b.json
generated
Normal file
20
.sqlx/query-1681e2e9011c799d91f79b45ab712963093766c6c8b86a0a46fa6e1b3e1dca7b.json
generated
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "select max(updated_at) as last_updated_at from time_entries",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "last_updated_at",
|
||||||
|
"type_info": "Timestamptz"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": []
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
null
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "1681e2e9011c799d91f79b45ab712963093766c6c8b86a0a46fa6e1b3e1dca7b"
|
||||||
|
}
|
||||||
20
.sqlx/query-19019001661f62a6c91ef13cd5903df954eb6c3316bce5146388d3066fd1d410.json
generated
Normal file
20
.sqlx/query-19019001661f62a6c91ef13cd5903df954eb6c3316bce5146388d3066fd1d410.json
generated
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "select id from tags",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": []
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "19019001661f62a6c91ef13cd5903df954eb6c3316bce5146388d3066fd1d410"
|
||||||
|
}
|
||||||
26
.sqlx/query-2699f6f1991bae9b83566276daa4ed6b0a8984e46b112fc7e4ce17e07d444367.json
generated
Normal file
26
.sqlx/query-2699f6f1991bae9b83566276daa4ed6b0a8984e46b112fc7e4ce17e07d444367.json
generated
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO time_entries (id, workspace_id, user_id, project_id, task_id, start, stop, duration, updated_at, description, billable, server_deleted_at, permissions)\n SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::bigint[], $4::bigint[], $5::bigint[], $6::timestamptz[], $7::timestamptz[], $8::int[], $9::timestamptz[], $10::text[], $11::bool[], $12::timestamptz[], $13::text[])\n ON CONFLICT (id) DO UPDATE SET\n workspace_id = excluded.workspace_id,\n user_id = excluded.user_id,\n project_id = excluded.project_id,\n task_id = excluded.task_id,\n start = excluded.start,\n stop = excluded.stop,\n duration = excluded.duration,\n updated_at = excluded.updated_at,\n description = excluded.description,\n billable = excluded.billable,\n server_deleted_at = excluded.server_deleted_at,\n permissions = excluded.permissions\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"Int4Array",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"TextArray",
|
||||||
|
"BoolArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"TextArray"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "2699f6f1991bae9b83566276daa4ed6b0a8984e46b112fc7e4ce17e07d444367"
|
||||||
|
}
|
||||||
20
.sqlx/query-3c9f3d19e00757a1b6b61b42bf34ea6099b4e4c16be2c345bbc7fe9604b89938.json
generated
Normal file
20
.sqlx/query-3c9f3d19e00757a1b6b61b42bf34ea6099b4e4c16be2c345bbc7fe9604b89938.json
generated
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "select id from tracking_clients",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": []
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "3c9f3d19e00757a1b6b61b42bf34ea6099b4e4c16be2c345bbc7fe9604b89938"
|
||||||
|
}
|
||||||
16
.sqlx/query-6fe6248362270f0c9c522b3e27436972a4b56f7ea6a4cdb0b7a3e07d969f39de.json
generated
Normal file
16
.sqlx/query-6fe6248362270f0c9c522b3e27436972a4b56f7ea6a4cdb0b7a3e07d969f39de.json
generated
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO workspaces (id, organization_id, name)\n SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::text[])\n ON CONFLICT (id) DO UPDATE SET\n organization_id = excluded.organization_id,\n name = excluded.name\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"TextArray"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "6fe6248362270f0c9c522b3e27436972a4b56f7ea6a4cdb0b7a3e07d969f39de"
|
||||||
|
}
|
||||||
20
.sqlx/query-a6ba6775ce708715abcbf946cb816fdba2b068f1645e7941f54a43a2b6639d31.json
generated
Normal file
20
.sqlx/query-a6ba6775ce708715abcbf946cb816fdba2b068f1645e7941f54a43a2b6639d31.json
generated
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "select id from projects",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": []
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "a6ba6775ce708715abcbf946cb816fdba2b068f1645e7941f54a43a2b6639d31"
|
||||||
|
}
|
||||||
20
.sqlx/query-e76966ebbfd08291c10ce64fb947bfcf0096ae4f16e3f91ffd3f512a9c949c45.json
generated
Normal file
20
.sqlx/query-e76966ebbfd08291c10ce64fb947bfcf0096ae4f16e3f91ffd3f512a9c949c45.json
generated
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "select id from workspaces",
|
||||||
|
"describe": {
|
||||||
|
"columns": [
|
||||||
|
{
|
||||||
|
"ordinal": 0,
|
||||||
|
"name": "id",
|
||||||
|
"type_info": "Int8"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parameters": {
|
||||||
|
"Left": []
|
||||||
|
},
|
||||||
|
"nullable": [
|
||||||
|
false
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"hash": "e76966ebbfd08291c10ce64fb947bfcf0096ae4f16e3f91ffd3f512a9c949c45"
|
||||||
|
}
|
||||||
28
.sqlx/query-eb4f7048dcaa8186c11bc20961e941f02c599088471b3515b39c308f551d6a7e.json
generated
Normal file
28
.sqlx/query-eb4f7048dcaa8186c11bc20961e941f02c599088471b3515b39c308f551d6a7e.json
generated
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"db_name": "PostgreSQL",
|
||||||
|
"query": "\n INSERT INTO projects (id, workspace_id, client_id, name, color, status, active, updated_at, start_date, created_at, server_deleted_at, actual_hours, actual_seconds, can_track_time, permissions)\n SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::bigint[], $4::text[], $5::text[], $6::text[], $7::bool[], $8::timestamptz[], $9::date[], $10::timestamptz[], $11::timestamptz[], $12::int[], $13::int[], $14::bool[], $15::text[])\n ON CONFLICT (id) DO UPDATE SET\n workspace_id = excluded.workspace_id,\n client_id = excluded.client_id,\n name = excluded.name,\n color = excluded.color,\n status = excluded.status,\n active = excluded.active,\n updated_at = excluded.updated_at,\n start_date = excluded.start_date,\n created_at = excluded.created_at,\n server_deleted_at = excluded.server_deleted_at,\n actual_hours = excluded.actual_hours,\n actual_seconds = excluded.actual_seconds,\n can_track_time = excluded.can_track_time,\n permissions = excluded.permissions\n ",
|
||||||
|
"describe": {
|
||||||
|
"columns": [],
|
||||||
|
"parameters": {
|
||||||
|
"Left": [
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"Int8Array",
|
||||||
|
"TextArray",
|
||||||
|
"TextArray",
|
||||||
|
"TextArray",
|
||||||
|
"BoolArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"DateArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"TimestamptzArray",
|
||||||
|
"Int4Array",
|
||||||
|
"Int4Array",
|
||||||
|
"BoolArray",
|
||||||
|
"TextArray"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"nullable": []
|
||||||
|
},
|
||||||
|
"hash": "eb4f7048dcaa8186c11bc20961e941f02c599088471b3515b39c308f551d6a7e"
|
||||||
|
}
|
||||||
124
Cargo.lock
generated
124
Cargo.lock
generated
@ -51,6 +51,55 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstream"
|
||||||
|
version = "0.6.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526"
|
||||||
|
dependencies = [
|
||||||
|
"anstyle",
|
||||||
|
"anstyle-parse",
|
||||||
|
"anstyle-query",
|
||||||
|
"anstyle-wincon",
|
||||||
|
"colorchoice",
|
||||||
|
"is_terminal_polyfill",
|
||||||
|
"utf8parse",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstyle"
|
||||||
|
version = "1.0.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstyle-parse"
|
||||||
|
version = "0.2.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb"
|
||||||
|
dependencies = [
|
||||||
|
"utf8parse",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstyle-query"
|
||||||
|
version = "1.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a"
|
||||||
|
dependencies = [
|
||||||
|
"windows-sys 0.52.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anstyle-wincon"
|
||||||
|
version = "3.0.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8"
|
||||||
|
dependencies = [
|
||||||
|
"anstyle",
|
||||||
|
"windows-sys 0.52.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anyhow"
|
name = "anyhow"
|
||||||
version = "1.0.86"
|
version = "1.0.86"
|
||||||
@ -246,6 +295,52 @@ dependencies = [
|
|||||||
"windows-targets 0.52.6",
|
"windows-targets 0.52.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap"
|
||||||
|
version = "4.5.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "35723e6a11662c2afb578bcf0b88bf6ea8e21282a953428f240574fcc3a2b5b3"
|
||||||
|
dependencies = [
|
||||||
|
"clap_builder",
|
||||||
|
"clap_derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_builder"
|
||||||
|
version = "4.5.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "49eb96cbfa7cfa35017b7cd548c75b14c3118c98b423041d70562665e07fb0fa"
|
||||||
|
dependencies = [
|
||||||
|
"anstream",
|
||||||
|
"anstyle",
|
||||||
|
"clap_lex",
|
||||||
|
"strsim",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_derive"
|
||||||
|
version = "4.5.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e"
|
||||||
|
dependencies = [
|
||||||
|
"heck 0.5.0",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.71",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "clap_lex"
|
||||||
|
version = "0.7.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorchoice"
|
||||||
|
version = "1.0.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "const-oid"
|
name = "const-oid"
|
||||||
version = "0.9.6"
|
version = "0.9.6"
|
||||||
@ -398,12 +493,6 @@ dependencies = [
|
|||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "dotenv"
|
|
||||||
version = "0.15.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dotenvy"
|
name = "dotenvy"
|
||||||
version = "0.15.7"
|
version = "0.15.7"
|
||||||
@ -716,6 +805,12 @@ dependencies = [
|
|||||||
"unicode-segmentation",
|
"unicode-segmentation",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "heck"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hermit-abi"
|
name = "hermit-abi"
|
||||||
version = "0.3.9"
|
version = "0.3.9"
|
||||||
@ -954,6 +1049,12 @@ version = "2.9.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
|
checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "is_terminal_polyfill"
|
||||||
|
version = "1.70.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "itertools"
|
name = "itertools"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
@ -2054,7 +2155,7 @@ checksum = "5833ef53aaa16d860e92123292f1f6a3d53c34ba8b1969f152ef1a7bb803f3c8"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"dotenvy",
|
"dotenvy",
|
||||||
"either",
|
"either",
|
||||||
"heck",
|
"heck 0.4.1",
|
||||||
"hex",
|
"hex",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
@ -2351,7 +2452,8 @@ dependencies = [
|
|||||||
"axum",
|
"axum",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"chrono",
|
"chrono",
|
||||||
"dotenv",
|
"clap",
|
||||||
|
"dotenvy",
|
||||||
"futures",
|
"futures",
|
||||||
"governor",
|
"governor",
|
||||||
"itertools",
|
"itertools",
|
||||||
@ -2607,6 +2709,12 @@ version = "2.1.3"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
|
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "utf8parse"
|
||||||
|
version = "0.2.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "valuable"
|
name = "valuable"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
|
|||||||
@ -20,9 +20,10 @@ serde_json_path_to_error = "0.1.4"
|
|||||||
url = "2.5.2"
|
url = "2.5.2"
|
||||||
serde_with = "3.9.0"
|
serde_with = "3.9.0"
|
||||||
sqlx = { version = "0.7.4", features = ["postgres", "runtime-tokio", "macros", "chrono"] }
|
sqlx = { version = "0.7.4", features = ["postgres", "runtime-tokio", "macros", "chrono"] }
|
||||||
dotenv = "0.15.0"
|
|
||||||
futures = "0.3.30"
|
futures = "0.3.30"
|
||||||
tracing = "0.1.40"
|
tracing = "0.1.40"
|
||||||
tracing-subscriber = "0.3.18"
|
tracing-subscriber = "0.3.18"
|
||||||
itertools = "0.13.0"
|
itertools = "0.13.0"
|
||||||
soa-rs = "0.6.1"
|
soa-rs = "0.6.1"
|
||||||
|
clap = { version = "4.5.11", features = ["derive", "env"] }
|
||||||
|
dotenvy = "0.15.7"
|
||||||
|
|||||||
@ -1,16 +1,16 @@
|
|||||||
-- Create a new join table to associate time_entries with tags
|
-- Create a new join table to associate time_entries with tags
|
||||||
CREATE TABLE public.time_entry_tags (
|
CREATE TABLE time_entry_tags (
|
||||||
time_entry_id bigint not null references public.time_entries(id) on delete cascade,
|
time_entry_id bigint not null references time_entries(id) on delete cascade,
|
||||||
tag_id bigint not null references public.tags(id) on delete cascade,
|
tag_id bigint not null references tags(id) on delete cascade,
|
||||||
primary key (time_entry_id, tag_id)
|
primary key (time_entry_id, tag_id)
|
||||||
);
|
);
|
||||||
|
|
||||||
-- Insert data into the new join table based on the existing time_entries and tags data
|
-- Insert data into the new join table based on the existing time_entries and tags data
|
||||||
INSERT INTO public.time_entry_tags (time_entry_id, tag_id)
|
INSERT INTO time_entry_tags (time_entry_id, tag_id)
|
||||||
SELECT time_entries.id, UNNEST(time_entries.tag_ids) AS tag_id
|
SELECT time_entries.id, UNNEST(time_entries.tag_ids) AS tag_id
|
||||||
FROM public.time_entries
|
FROM time_entries
|
||||||
WHERE time_entries.tag_ids IS NOT NULL;
|
WHERE time_entries.tag_ids IS NOT NULL;
|
||||||
|
|
||||||
-- Remove the tag_ids array column from the time_entries table
|
-- Remove the tag_ids array column from the time_entries table
|
||||||
ALTER TABLE public.time_entries
|
ALTER TABLE time_entries
|
||||||
DROP COLUMN tag_ids;
|
DROP COLUMN tag_ids;
|
||||||
|
|||||||
418
src/main.rs
418
src/main.rs
@ -1,27 +1,11 @@
|
|||||||
use crate::toggl::types::{Project, Tag, TimeEntry, TogglReportFilters, TrackingClient};
|
use axum::response::IntoResponse;
|
||||||
use chrono::{DateTime, NaiveDate, TimeDelta, Utc};
|
use sqlx::{Connection, PgPool};
|
||||||
use itertools::Itertools;
|
|
||||||
use soa_rs::Soa;
|
|
||||||
use sqlx::{Connection, PgConnection};
|
|
||||||
use toggl::TogglApi;
|
use toggl::TogglApi;
|
||||||
|
use worker::Worker;
|
||||||
|
|
||||||
mod sensitive;
|
mod server;
|
||||||
mod toggl;
|
mod toggl;
|
||||||
|
mod worker;
|
||||||
macro_rules! cast_slice {
|
|
||||||
($slice: expr, $typ: ty) => {
|
|
||||||
&($slice.iter().map(|id| *id as $typ).collect_vec())[..]
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! cast_slice_opts {
|
|
||||||
($slice: expr, $typ: ty) => {
|
|
||||||
$slice
|
|
||||||
.iter()
|
|
||||||
.map(|opt| opt.map(|id| id as $typ))
|
|
||||||
.collect_vec()
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
enum AppError {
|
enum AppError {
|
||||||
@ -33,11 +17,22 @@ enum AppError {
|
|||||||
|
|
||||||
#[error("User modified since time delta is too large. Max allowed is 90 days.")]
|
#[error("User modified since time delta is too large. Max allowed is 90 days.")]
|
||||||
LookBackTooLarge,
|
LookBackTooLarge,
|
||||||
|
|
||||||
|
#[error("IO error: {0}")]
|
||||||
|
IO(#[from] std::io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Worker {
|
impl IntoResponse for AppError {
|
||||||
db: PgConnection,
|
fn into_response(self) -> axum::http::Response<axum::body::Body> {
|
||||||
toggl_api: TogglApi,
|
let status = match &self {
|
||||||
|
AppError::SqlxError(_) => axum::http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
AppError::TogglError(_) => axum::http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
AppError::LookBackTooLarge => axum::http::StatusCode::BAD_REQUEST,
|
||||||
|
AppError::IO(_) => axum::http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
};
|
||||||
|
|
||||||
|
(status, self.to_string()).into_response()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TableSummary {
|
struct TableSummary {
|
||||||
@ -47,354 +42,69 @@ struct TableSummary {
|
|||||||
tag_ids: Vec<u64>,
|
tag_ids: Vec<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Worker {
|
use clap::{Parser, Subcommand};
|
||||||
async fn get_ids(&mut self) -> Result<TableSummary, AppError> {
|
use std::net::IpAddr;
|
||||||
let client_ids = sqlx::query!("select id from tracking_clients")
|
use chrono::TimeDelta;
|
||||||
.fetch_all(&mut self.db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let workspace_ids = sqlx::query!("select id from workspaces")
|
#[derive(Parser)]
|
||||||
.fetch_all(&mut self.db)
|
#[command(author, version, about, long_about = None)]
|
||||||
.await?;
|
struct Cli {
|
||||||
|
#[command(subcommand)]
|
||||||
|
command: Commands,
|
||||||
|
|
||||||
let project_ids = sqlx::query!("select id from projects")
|
#[arg(long, env = "DATABASE_URL")]
|
||||||
.fetch_all(&mut self.db)
|
database_url: String,
|
||||||
.await?;
|
|
||||||
|
|
||||||
let tag_ids = sqlx::query!("select id from tags")
|
#[arg(long, env = "API_TOKEN")]
|
||||||
.fetch_all(&mut self.db)
|
api_token: String,
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(TableSummary {
|
#[arg(long, env = "DEFAULT_WORKSPACE_ID")]
|
||||||
client_ids: client_ids.iter().map(|row| row.id as u64).collect(),
|
default_workspace_id: u64,
|
||||||
workspace_ids: workspace_ids.iter().map(|row| row.id as u64).collect(),
|
|
||||||
project_ids: project_ids.iter().map(|row| row.id as u64).collect(),
|
|
||||||
tag_ids: tag_ids.iter().map(|row| row.id as u64).collect(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn fetch_within(
|
#[derive(Subcommand)]
|
||||||
&mut self,
|
enum Commands {
|
||||||
start: NaiveDate,
|
Server {
|
||||||
end: NaiveDate,
|
#[arg(long, env = "IP", default_value = "127.0.0.1")]
|
||||||
) -> Result<(), AppError> {
|
ip: IpAddr,
|
||||||
let results = self
|
|
||||||
.toggl_api
|
#[arg(long, env = "PORT", default_value = "3000")]
|
||||||
.search(
|
port: u16,
|
||||||
self.toggl_api.workspace_id,
|
|
||||||
TogglReportFilters {
|
|
||||||
start_date: Some(start),
|
|
||||||
end_date: Some(end),
|
|
||||||
..Default::default()
|
|
||||||
},
|
},
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let time_entries = results
|
Migrate,
|
||||||
.into_iter()
|
|
||||||
.map(|entry| entry.into_time_entry(self.toggl_api.workspace_id))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
self.update_database(time_entries).await
|
Sync,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn fetch_changed_since(&mut self, look_back: TimeDelta) -> Result<(), AppError> {
|
|
||||||
if look_back > TimeDelta::days(90) {
|
|
||||||
return Err(AppError::LookBackTooLarge);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.update_time_entries(Utc::now() - look_back).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update(&mut self, default_look_back: TimeDelta) -> Result<(), AppError> {
|
|
||||||
let result = sqlx::query!("select max(updated_at) as last_updated_at from time_entries")
|
|
||||||
.fetch_optional(&mut self.db)
|
|
||||||
.await
|
|
||||||
.expect("Could not fetch max updated_at from time_entries");
|
|
||||||
|
|
||||||
let fetch_since = result
|
|
||||||
.and_then(|record| record.last_updated_at)
|
|
||||||
.unwrap_or_else(|| Utc::now() - default_look_back);
|
|
||||||
|
|
||||||
self.update_time_entries(fetch_since).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_time_entries(&mut self, fetch_since: DateTime<Utc>) -> Result<(), AppError> {
|
|
||||||
let time_entries = self
|
|
||||||
.toggl_api
|
|
||||||
.get_time_entries_for_user_modified_since(fetch_since)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
self.update_database(time_entries).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_database(&mut self, time_entries: Vec<TimeEntry>) -> Result<(), AppError> {
|
|
||||||
let existing_ids = self.get_ids().await?;
|
|
||||||
|
|
||||||
let fetch_workspaces = time_entries
|
|
||||||
.iter()
|
|
||||||
.map(|entry| entry.workspace_id)
|
|
||||||
.filter(|workspace_id| !existing_ids.workspace_ids.contains(&workspace_id))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let fetch_projects = time_entries
|
|
||||||
.iter()
|
|
||||||
.map(|entry| entry.project_id)
|
|
||||||
.filter_map(|project_id| project_id)
|
|
||||||
.any(|project_id| !existing_ids.project_ids.contains(&project_id));
|
|
||||||
|
|
||||||
let fetch_tags = time_entries
|
|
||||||
.iter()
|
|
||||||
.flat_map(|entry| entry.tag_ids.iter())
|
|
||||||
.any(|tag| !existing_ids.tag_ids.contains(&tag));
|
|
||||||
|
|
||||||
if !fetch_workspaces.is_empty() {
|
|
||||||
self.update_workspaces(&fetch_workspaces).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if fetch_projects {
|
|
||||||
self.update_projects(&existing_ids).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if fetch_tags {
|
|
||||||
self.update_tags().await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
for time_entries_chunk in time_entries.chunks(100) {
|
|
||||||
self.update_time_entries_chunk(time_entries_chunk).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_time_entries_chunk(
|
|
||||||
&mut self,
|
|
||||||
time_entries: &[TimeEntry],
|
|
||||||
) -> Result<(), AppError> {
|
|
||||||
let time_entries = Soa::from(time_entries);
|
|
||||||
|
|
||||||
sqlx::query!(
|
|
||||||
r#"
|
|
||||||
INSERT INTO time_entries (id, workspace_id, user_id, project_id, task_id, start, stop, duration, updated_at, description, billable, server_deleted_at, permissions)
|
|
||||||
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::bigint[], $4::bigint[], $5::bigint[], $6::timestamptz[], $7::timestamptz[], $8::int[], $9::timestamptz[], $10::text[], $11::bool[], $12::timestamptz[], $13::text[])
|
|
||||||
ON CONFLICT (id) DO UPDATE SET
|
|
||||||
workspace_id = excluded.workspace_id,
|
|
||||||
user_id = excluded.user_id,
|
|
||||||
project_id = excluded.project_id,
|
|
||||||
task_id = excluded.task_id,
|
|
||||||
start = excluded.start,
|
|
||||||
stop = excluded.stop,
|
|
||||||
duration = excluded.duration,
|
|
||||||
updated_at = excluded.updated_at,
|
|
||||||
description = excluded.description,
|
|
||||||
billable = excluded.billable,
|
|
||||||
server_deleted_at = excluded.server_deleted_at,
|
|
||||||
permissions = excluded.permissions
|
|
||||||
"#,
|
|
||||||
cast_slice!(time_entries.id(), i64),
|
|
||||||
cast_slice!(time_entries.workspace_id(), i64),
|
|
||||||
cast_slice!(time_entries.user_id(), i64),
|
|
||||||
cast_slice_opts!(time_entries.project_id(), i64) as _,
|
|
||||||
cast_slice_opts!(time_entries.task_id(), i64) as _,
|
|
||||||
time_entries.start(),
|
|
||||||
time_entries.stop() as _,
|
|
||||||
cast_slice_opts!(time_entries.duration(), i32) as _,
|
|
||||||
time_entries.updated_at(),
|
|
||||||
time_entries.description() as _,
|
|
||||||
time_entries.billable(),
|
|
||||||
time_entries.server_deleted_at() as _,
|
|
||||||
time_entries.permissions() as _,
|
|
||||||
)
|
|
||||||
.execute(&mut self.db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_workspaces(&mut self, workspace_ids: &[u64]) -> Result<(), AppError> {
|
|
||||||
let workspaces = workspace_ids
|
|
||||||
.iter()
|
|
||||||
.map(|id| self.toggl_api.get_workspace(*id));
|
|
||||||
|
|
||||||
let workspaces = futures::future::join_all(workspaces)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
|
|
||||||
let workspaces = Soa::from(workspaces.as_slice());
|
|
||||||
|
|
||||||
sqlx::query!(
|
|
||||||
r#"
|
|
||||||
INSERT INTO workspaces (id, organization_id, name)
|
|
||||||
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::text[])
|
|
||||||
ON CONFLICT (id) DO UPDATE SET
|
|
||||||
organization_id = excluded.organization_id,
|
|
||||||
name = excluded.name
|
|
||||||
"#,
|
|
||||||
cast_slice!(workspaces.id(), i64),
|
|
||||||
cast_slice!(workspaces.organization_id(), i64),
|
|
||||||
workspaces.name(),
|
|
||||||
)
|
|
||||||
.execute(&mut self.db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_projects(&mut self, existing_ids: &TableSummary) -> Result<(), AppError> {
|
|
||||||
let projects = self.toggl_api.get_projects().await?;
|
|
||||||
|
|
||||||
let fetch_clients = projects
|
|
||||||
.iter()
|
|
||||||
.map(|project| project.client_id)
|
|
||||||
.filter_map(|client_id| client_id)
|
|
||||||
.any(|client_id| !existing_ids.client_ids.contains(&(client_id as u64)));
|
|
||||||
|
|
||||||
if fetch_clients {
|
|
||||||
self.update_clients().await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let projects: Soa<Project> = Soa::from(projects.as_slice());
|
|
||||||
|
|
||||||
sqlx::query!(
|
|
||||||
r#"
|
|
||||||
INSERT INTO projects (id, workspace_id, client_id, name, color, status, active, updated_at, start_date, created_at, server_deleted_at, actual_hours, actual_seconds, can_track_time, permissions)
|
|
||||||
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::bigint[], $4::text[], $5::text[], $6::text[], $7::bool[], $8::timestamptz[], $9::date[], $10::timestamptz[], $11::timestamptz[], $12::int[], $13::int[], $14::bool[], $15::text[])
|
|
||||||
ON CONFLICT (id) DO UPDATE SET
|
|
||||||
workspace_id = excluded.workspace_id,
|
|
||||||
client_id = excluded.client_id,
|
|
||||||
name = excluded.name,
|
|
||||||
color = excluded.color,
|
|
||||||
status = excluded.status,
|
|
||||||
active = excluded.active,
|
|
||||||
updated_at = excluded.updated_at,
|
|
||||||
start_date = excluded.start_date,
|
|
||||||
created_at = excluded.created_at,
|
|
||||||
server_deleted_at = excluded.server_deleted_at,
|
|
||||||
actual_hours = excluded.actual_hours,
|
|
||||||
actual_seconds = excluded.actual_seconds,
|
|
||||||
can_track_time = excluded.can_track_time,
|
|
||||||
permissions = excluded.permissions
|
|
||||||
"#,
|
|
||||||
cast_slice!(projects.id(), i64),
|
|
||||||
cast_slice!(projects.workspace_id(), i64),
|
|
||||||
cast_slice_opts!(projects.client_id(), i64) as _,
|
|
||||||
projects.name(),
|
|
||||||
projects.color(),
|
|
||||||
&projects.status().iter().map(|s| s.to_string()).collect::<Vec<_>>()[..],
|
|
||||||
projects.active(),
|
|
||||||
projects.updated_at(),
|
|
||||||
projects.start_date() as _,
|
|
||||||
projects.created_at(),
|
|
||||||
projects.server_deleted_at() as _,
|
|
||||||
cast_slice_opts!(projects.actual_hours(), i64) as _,
|
|
||||||
cast_slice_opts!(projects.actual_seconds(), i64) as _,
|
|
||||||
projects.can_track_time(),
|
|
||||||
projects.permissions() as _,
|
|
||||||
)
|
|
||||||
.execute(&mut self.db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_tags(&mut self) -> Result<(), AppError> {
|
|
||||||
let tags = self.toggl_api.get_tags().await?;
|
|
||||||
let tags: Soa<Tag> = Soa::from(tags.as_slice());
|
|
||||||
|
|
||||||
sqlx::query!(
|
|
||||||
r#"
|
|
||||||
INSERT INTO tags (id, name, workspace_id, creator_id, updated_at, deleted_at, permissions)
|
|
||||||
SELECT * FROM UNNEST($1::bigint[], $2::text[], $3::bigint[], $4::bigint[], $5::timestamptz[], $6::timestamptz[], $7::text[])
|
|
||||||
ON CONFLICT (id) DO UPDATE SET
|
|
||||||
name = excluded.name,
|
|
||||||
workspace_id = excluded.workspace_id,
|
|
||||||
creator_id = excluded.creator_id,
|
|
||||||
updated_at = excluded.updated_at,
|
|
||||||
deleted_at = excluded.deleted_at,
|
|
||||||
permissions = excluded.permissions
|
|
||||||
"#,
|
|
||||||
cast_slice!(tags.id(), i64),
|
|
||||||
tags.name(),
|
|
||||||
cast_slice!(tags.workspace_id(), i64),
|
|
||||||
cast_slice!(tags.creator_id(), i64),
|
|
||||||
tags.updated_at(),
|
|
||||||
// Nullable fields fail to type check with UNNEST batch inserts so we silence the
|
|
||||||
// errors using ` as _`.
|
|
||||||
tags.deleted_at() as _,
|
|
||||||
tags.permissions() as _,
|
|
||||||
)
|
|
||||||
.execute(&mut self.db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_clients(&mut self) -> Result<(), AppError> {
|
|
||||||
let clients = self.toggl_api.get_clients().await?;
|
|
||||||
|
|
||||||
let clients: Soa<TrackingClient> = Soa::from(clients.as_slice());
|
|
||||||
|
|
||||||
sqlx::query!(r#"
|
|
||||||
INSERT INTO tracking_clients
|
|
||||||
(id, updated_at, archived, creator_id, integration_provider, notes, name, server_deleted_at, workspace_id, permissions)
|
|
||||||
SELECT * FROM UNNEST($1::bigint[], $2::timestamptz[], $3::bool[], $4::bigint[], $5::text[], $6::text[], $7::text[], $8::timestamptz[], $9::bigint[], $10::text[])
|
|
||||||
ON CONFLICT (id) DO UPDATE SET
|
|
||||||
updated_at = excluded.updated_at,
|
|
||||||
archived = excluded.archived,
|
|
||||||
creator_id = excluded.creator_id,
|
|
||||||
integration_provider = excluded.integration_provider,
|
|
||||||
notes = excluded.notes,
|
|
||||||
name = excluded.name,
|
|
||||||
server_deleted_at = excluded.server_deleted_at,
|
|
||||||
workspace_id = excluded.workspace_id,
|
|
||||||
permissions = excluded.permissions
|
|
||||||
"#,
|
|
||||||
cast_slice!(clients.id(), i64),
|
|
||||||
clients.updated_at(),
|
|
||||||
clients.archived(),
|
|
||||||
cast_slice!(clients.creator_id(), i64),
|
|
||||||
// For the next two, we are assuming these are Option<String> as datatype. If these are different, please update accordingly.
|
|
||||||
clients.integration_provider() as _,
|
|
||||||
clients.notes() as _,
|
|
||||||
clients.name(),
|
|
||||||
clients.server_deleted_at() as _,
|
|
||||||
cast_slice!(clients.workspace_id(), i64),
|
|
||||||
clients.permissions() as _
|
|
||||||
)
|
|
||||||
.execute(&mut self.db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
dotenv::dotenv().expect("Failed to load .env file");
|
dotenvy::dotenv().expect("Failed to load .env file");
|
||||||
|
|
||||||
// Init tracing
|
// Init tracing
|
||||||
tracing_subscriber::fmt::init();
|
tracing_subscriber::fmt::init();
|
||||||
|
let cli = Cli::parse();
|
||||||
|
|
||||||
let api = TogglApi::new(sensitive::API_TOKEN, sensitive::WORKSPACE_ID);
|
let toggl_api = TogglApi::new(&cli.api_token, cli.default_workspace_id);
|
||||||
|
let mut db = PgPool::connect(&cli.database_url).await.unwrap();
|
||||||
|
|
||||||
let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set");
|
sqlx::migrate!("./migrations")
|
||||||
|
.run(&db)
|
||||||
let mut worker = Worker {
|
|
||||||
db: PgConnection::connect(&database_url).await.unwrap(),
|
|
||||||
toggl_api: api,
|
|
||||||
};
|
|
||||||
|
|
||||||
worker.update_tags().await.unwrap();
|
|
||||||
|
|
||||||
let start = NaiveDate::from_ymd_opt(2024, 2, 1)
|
|
||||||
.expect("Invalid date");
|
|
||||||
|
|
||||||
let end = NaiveDate::from_ymd_opt(2024, 2, 2)
|
|
||||||
.expect("Invalid date");
|
|
||||||
|
|
||||||
worker
|
|
||||||
.fetch_within(start, end)
|
|
||||||
.await
|
.await
|
||||||
.expect("Failed to fetch time entries");
|
.expect("Failed to run migrations");
|
||||||
|
|
||||||
|
// Return early if we are just migrating
|
||||||
|
if let Commands::Migrate = cli.command {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let worker = Worker { db, toggl_api };
|
||||||
|
|
||||||
|
if let Commands::Server { ip, port } = cli.command {
|
||||||
|
server::serve(worker, ip, port).await.expect("Failed to start server");
|
||||||
|
} else {
|
||||||
|
worker.update(TimeDelta::days(30))
|
||||||
|
.await.expect("Failed to update worker");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
38
src/server.rs
Normal file
38
src/server.rs
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use axum::response::IntoResponse;
|
||||||
|
use axum::{
|
||||||
|
http::StatusCode,
|
||||||
|
routing::{get, post},
|
||||||
|
Extension, Json, Router,
|
||||||
|
};
|
||||||
|
use chrono::TimeDelta;
|
||||||
|
|
||||||
|
use crate::worker::Worker;
|
||||||
|
use crate::AppError;
|
||||||
|
|
||||||
|
async fn sync(Extension(worker): Extension<Worker>) -> Result<impl IntoResponse, AppError> {
|
||||||
|
worker.update(TimeDelta::days(30)).await?;
|
||||||
|
|
||||||
|
Ok("Ok")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn serve(worker: Worker, ip: IpAddr, port: u16) -> Result<(), AppError> {
|
||||||
|
// build our application with a route
|
||||||
|
let app = Router::new()
|
||||||
|
.route("/health", get(|| async { "Ok" }))
|
||||||
|
.route("/sync", post(sync))
|
||||||
|
.layer(Extension(worker));
|
||||||
|
|
||||||
|
// run our app with hyper, listening globally on port 3000
|
||||||
|
let listener = tokio::net::TcpListener::bind((ip, port)).await?;
|
||||||
|
|
||||||
|
axum::serve(listener, app)
|
||||||
|
.with_graceful_shutdown(async {
|
||||||
|
tokio::signal::ctrl_c()
|
||||||
|
.await
|
||||||
|
.expect("Failed to install CTRL+C signal handler");
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
382
src/toggl/mod.rs
382
src/toggl/mod.rs
@ -1,42 +1,19 @@
|
|||||||
use axum::async_trait;
|
|
||||||
use base64::engine::general_purpose::STANDARD;
|
use base64::engine::general_purpose::STANDARD;
|
||||||
use base64::Engine;
|
use base64::Engine;
|
||||||
use chrono::{DateTime, SecondsFormat, Utc};
|
use chrono::{DateTime, SecondsFormat, Utc};
|
||||||
use governor::clock::DefaultClock;
|
|
||||||
use governor::state::{InMemoryState, NotKeyed};
|
|
||||||
use reqwest::header::{HeaderMap, HeaderValue};
|
use reqwest::header::{HeaderMap, HeaderValue};
|
||||||
use reqwest::Response;
|
use reqwest::Response;
|
||||||
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
|
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
|
||||||
use reqwest_retry::policies::ExponentialBackoff;
|
use reqwest_retry::policies::ExponentialBackoff;
|
||||||
use reqwest_retry::{Jitter, RetryTransientMiddleware};
|
use reqwest_retry::{Jitter, RetryTransientMiddleware};
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use std::num::NonZero;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use support::ReqwestRateLimiter;
|
||||||
|
|
||||||
struct ReqwestRateLimiter {
|
mod support;
|
||||||
rate_limiter: governor::RateLimiter<NotKeyed, InMemoryState, DefaultClock>,
|
pub mod types;
|
||||||
}
|
|
||||||
|
|
||||||
impl ReqwestRateLimiter {
|
#[derive(Debug, Clone)]
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
rate_limiter: governor::RateLimiter::direct(governor::Quota::per_second(
|
|
||||||
NonZero::new(1u32).unwrap(),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl reqwest_ratelimit::RateLimiter for ReqwestRateLimiter {
|
|
||||||
async fn acquire_permit(&self) {
|
|
||||||
// We don't need to introduce jitter here as that is handled by the retry_request
|
|
||||||
// middleware.
|
|
||||||
self.rate_limiter.until_ready().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct TogglApi {
|
pub struct TogglApi {
|
||||||
client: ClientWithMiddleware,
|
client: ClientWithMiddleware,
|
||||||
pub workspace_id: u64,
|
pub workspace_id: u64,
|
||||||
@ -288,357 +265,6 @@ impl TogglApi {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod types {
|
|
||||||
use chrono::{DateTime, NaiveDate, Utc};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_with::skip_serializing_none;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
|
||||||
pub struct TimeEntry {
|
|
||||||
pub id: u64,
|
|
||||||
pub workspace_id: u64,
|
|
||||||
pub user_id: u64,
|
|
||||||
pub project_id: Option<u64>,
|
|
||||||
pub task_id: Option<u64>,
|
|
||||||
|
|
||||||
pub start: DateTime<Utc>,
|
|
||||||
pub stop: Option<DateTime<Utc>>,
|
|
||||||
|
|
||||||
#[serde(with = "duration_field")]
|
|
||||||
pub duration: Option<u32>,
|
|
||||||
|
|
||||||
#[serde(rename = "at")]
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
|
|
||||||
pub description: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub tags: Vec<String>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub tag_ids: Vec<u64>,
|
|
||||||
|
|
||||||
pub billable: bool,
|
|
||||||
pub server_deleted_at: Option<DateTime<Utc>>,
|
|
||||||
pub permissions: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
mod duration_field {
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
pub fn serialize<S>(duration: &Option<u32>, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
match duration {
|
|
||||||
None => i32::serialize(&-1, serializer),
|
|
||||||
Some(duration) => i32::serialize(&(*duration as i32), serializer),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<u32>, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let duration = i32::deserialize(deserializer)?;
|
|
||||||
if duration < 0 {
|
|
||||||
Ok(None)
|
|
||||||
} else {
|
|
||||||
Ok(Some(duration as u32))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
|
||||||
pub struct Project {
|
|
||||||
pub id: i64,
|
|
||||||
pub workspace_id: i64,
|
|
||||||
pub client_id: Option<i64>,
|
|
||||||
|
|
||||||
pub name: String,
|
|
||||||
pub color: String,
|
|
||||||
pub status: ProjectStatus,
|
|
||||||
pub active: bool,
|
|
||||||
|
|
||||||
#[serde(rename = "at")]
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
pub start_date: NaiveDate,
|
|
||||||
pub created_at: DateTime<Utc>,
|
|
||||||
pub server_deleted_at: Option<DateTime<Utc>>,
|
|
||||||
|
|
||||||
pub actual_hours: Option<i64>,
|
|
||||||
pub actual_seconds: Option<i64>,
|
|
||||||
pub can_track_time: bool,
|
|
||||||
pub permissions: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum ProjectStatus {
|
|
||||||
Upcoming,
|
|
||||||
Active,
|
|
||||||
Archived,
|
|
||||||
Ended,
|
|
||||||
Deleted,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for ProjectStatus {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"{}",
|
|
||||||
match self {
|
|
||||||
ProjectStatus::Upcoming => "upcoming",
|
|
||||||
ProjectStatus::Active => "active",
|
|
||||||
ProjectStatus::Archived => "archived",
|
|
||||||
ProjectStatus::Ended => "ended",
|
|
||||||
ProjectStatus::Deleted => "deleted",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
|
||||||
pub struct TrackingClient {
|
|
||||||
/// The unique identifier for the client.
|
|
||||||
pub id: i64,
|
|
||||||
|
|
||||||
/// Represents the timestamp of the last update made to the client.
|
|
||||||
#[serde(rename = "at")]
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
|
|
||||||
/// Indicates whether the client is archived or not.
|
|
||||||
pub archived: bool,
|
|
||||||
|
|
||||||
pub creator_id: i64,
|
|
||||||
pub integration_provider: Option<String>,
|
|
||||||
pub notes: Option<String>,
|
|
||||||
|
|
||||||
/// The name of the client.
|
|
||||||
pub name: String,
|
|
||||||
|
|
||||||
/// Indicates the timestamp when the client was deleted. If the client is not deleted, this property will be null.
|
|
||||||
pub server_deleted_at: Option<DateTime<Utc>>,
|
|
||||||
|
|
||||||
/// The Workspace ID associated with the client.
|
|
||||||
#[serde(rename = "wid")]
|
|
||||||
pub workspace_id: i64,
|
|
||||||
|
|
||||||
pub permissions: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Soars, Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct Tag {
|
|
||||||
pub id: u64,
|
|
||||||
pub name: String,
|
|
||||||
pub workspace_id: u64,
|
|
||||||
pub creator_id: u64,
|
|
||||||
#[serde(rename = "at")]
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
pub deleted_at: Option<DateTime<Utc>>,
|
|
||||||
pub permissions: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct ReportEntry {
|
|
||||||
pub user_id: u64,
|
|
||||||
pub username: String,
|
|
||||||
pub project_id: Option<u64>,
|
|
||||||
pub task_id: Option<u64>,
|
|
||||||
pub billable: bool,
|
|
||||||
pub description: Option<String>,
|
|
||||||
pub tag_ids: Vec<u64>,
|
|
||||||
pub billable_amount_in_cents: Option<u64>,
|
|
||||||
pub hourly_rate_in_cents: Option<u64>,
|
|
||||||
pub currency: String,
|
|
||||||
pub time_entries: Vec<ReportEntryTimeDetails>,
|
|
||||||
pub row_number: u32,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub enriched_information: Option<ReportEntryEnrichedInfo>,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub rest: HashMap<String, serde_json::Value>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct ReportEntryEnrichedInfo {
|
|
||||||
pub project_id: Option<u64>,
|
|
||||||
pub project_name: Option<String>,
|
|
||||||
pub project_hex: Option<String>,
|
|
||||||
|
|
||||||
pub tag_names: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
|
||||||
pub struct ReportEntryTimeDetails {
|
|
||||||
pub id: u64,
|
|
||||||
pub seconds: u32,
|
|
||||||
pub start: DateTime<Utc>,
|
|
||||||
pub stop: DateTime<Utc>,
|
|
||||||
#[serde(rename = "at")]
|
|
||||||
pub updated_at: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReportEntry {
|
|
||||||
pub fn into_time_entry(self, workspace_id: u64) -> TimeEntry {
|
|
||||||
TimeEntry {
|
|
||||||
id: self.time_entries[0].id,
|
|
||||||
workspace_id,
|
|
||||||
user_id: self.user_id,
|
|
||||||
project_id: self.project_id,
|
|
||||||
task_id: self.task_id,
|
|
||||||
start: self.time_entries[0].start,
|
|
||||||
stop: Some(self.time_entries[0].stop),
|
|
||||||
duration: Some(self.time_entries[0].seconds),
|
|
||||||
updated_at: self.time_entries[0].updated_at,
|
|
||||||
description: self.description,
|
|
||||||
tags: self
|
|
||||||
.enriched_information
|
|
||||||
.map(|e| e.tag_names.clone())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
|
|
||||||
tag_ids: self.tag_ids,
|
|
||||||
billable: self.billable,
|
|
||||||
server_deleted_at: None,
|
|
||||||
permissions: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[skip_serializing_none]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Default)]
|
|
||||||
pub struct TogglReportFilters {
|
|
||||||
pub billable: Option<bool>,
|
|
||||||
pub client_ids: Option<Vec<u64>>,
|
|
||||||
pub description: Option<String>,
|
|
||||||
pub end_date: Option<NaiveDate>,
|
|
||||||
pub enrich_response: Option<bool>,
|
|
||||||
pub first_id: Option<u64>,
|
|
||||||
pub first_row_number: Option<u64>,
|
|
||||||
pub first_timestamp: Option<u64>,
|
|
||||||
pub group_ids: Option<Vec<u64>>,
|
|
||||||
pub grouped: Option<bool>,
|
|
||||||
pub hide_amounts: Option<bool>,
|
|
||||||
pub max_duration_seconds: Option<u64>,
|
|
||||||
pub min_duration_seconds: Option<u64>,
|
|
||||||
pub order_by: Option<String>,
|
|
||||||
pub order_dir: Option<String>,
|
|
||||||
pub page_size: Option<u64>,
|
|
||||||
#[serde(rename = "postedFields")]
|
|
||||||
pub posted_fields: Option<Vec<String>>,
|
|
||||||
pub project_ids: Option<Vec<u64>>,
|
|
||||||
pub rounding: Option<u64>,
|
|
||||||
pub rounding_minutes: Option<u64>,
|
|
||||||
#[serde(rename = "startTime")]
|
|
||||||
pub start_time: Option<String>,
|
|
||||||
pub start_date: Option<NaiveDate>,
|
|
||||||
pub tag_ids: Option<Vec<u64>>,
|
|
||||||
pub task_ids: Option<Vec<u64>>,
|
|
||||||
pub time_entry_ids: Option<Vec<u64>>,
|
|
||||||
pub user_ids: Option<Vec<u64>>,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub rest: HashMap<String, serde_json::Value>,
|
|
||||||
}
|
|
||||||
|
|
||||||
use soa_rs::Soars;
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
impl fmt::Debug for TogglReportFilters {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
let mut ds = f.debug_struct("TogglQuery");
|
|
||||||
|
|
||||||
if let Some(billable) = &self.billable {
|
|
||||||
ds.field("billable", billable);
|
|
||||||
}
|
|
||||||
if let Some(client_ids) = &self.client_ids {
|
|
||||||
ds.field("client_ids", client_ids);
|
|
||||||
}
|
|
||||||
if let Some(description) = &self.description {
|
|
||||||
ds.field("description", description);
|
|
||||||
}
|
|
||||||
if let Some(end_date) = &self.end_date {
|
|
||||||
ds.field("end_date", end_date);
|
|
||||||
}
|
|
||||||
if let Some(first_id) = &self.first_id {
|
|
||||||
ds.field("first_id", first_id);
|
|
||||||
}
|
|
||||||
if let Some(first_row_number) = &self.first_row_number {
|
|
||||||
ds.field("first_row_number", first_row_number);
|
|
||||||
}
|
|
||||||
if let Some(first_timestamp) = &self.first_timestamp {
|
|
||||||
ds.field("first_timestamp", first_timestamp);
|
|
||||||
}
|
|
||||||
if let Some(group_ids) = &self.group_ids {
|
|
||||||
ds.field("group_ids", group_ids);
|
|
||||||
}
|
|
||||||
if let Some(grouped) = &self.grouped {
|
|
||||||
ds.field("grouped", grouped);
|
|
||||||
}
|
|
||||||
if let Some(hide_amounts) = &self.hide_amounts {
|
|
||||||
ds.field("hide_amounts", hide_amounts);
|
|
||||||
}
|
|
||||||
if let Some(max_duration_seconds) = &self.max_duration_seconds {
|
|
||||||
ds.field("max_duration_seconds", max_duration_seconds);
|
|
||||||
}
|
|
||||||
if let Some(min_duration_seconds) = &self.min_duration_seconds {
|
|
||||||
ds.field("min_duration_seconds", min_duration_seconds);
|
|
||||||
}
|
|
||||||
if let Some(order_by) = &self.order_by {
|
|
||||||
ds.field("order_by", order_by);
|
|
||||||
}
|
|
||||||
if let Some(order_dir) = &self.order_dir {
|
|
||||||
ds.field("order_dir", order_dir);
|
|
||||||
}
|
|
||||||
if let Some(posted_fields) = &self.posted_fields {
|
|
||||||
ds.field("postedFields", posted_fields);
|
|
||||||
}
|
|
||||||
if let Some(project_ids) = &self.project_ids {
|
|
||||||
ds.field("project_ids", project_ids);
|
|
||||||
}
|
|
||||||
if let Some(rounding) = &self.rounding {
|
|
||||||
ds.field("rounding", rounding);
|
|
||||||
}
|
|
||||||
if let Some(rounding_minutes) = &self.rounding_minutes {
|
|
||||||
ds.field("rounding_minutes", rounding_minutes);
|
|
||||||
}
|
|
||||||
if let Some(start_time) = &self.start_time {
|
|
||||||
ds.field("startTime", start_time);
|
|
||||||
}
|
|
||||||
if let Some(start_date) = &self.start_date {
|
|
||||||
ds.field("start_date", start_date);
|
|
||||||
}
|
|
||||||
if let Some(tag_ids) = &self.tag_ids {
|
|
||||||
ds.field("tag_ids", tag_ids);
|
|
||||||
}
|
|
||||||
if let Some(task_ids) = &self.task_ids {
|
|
||||||
ds.field("task_ids", task_ids);
|
|
||||||
}
|
|
||||||
if let Some(time_entry_ids) = &self.time_entry_ids {
|
|
||||||
ds.field("time_entry_ids", time_entry_ids);
|
|
||||||
}
|
|
||||||
if let Some(user_ids) = &self.user_ids {
|
|
||||||
ds.field("user_ids", user_ids);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !self.rest.is_empty() {
|
|
||||||
ds.field("rest", &self.rest);
|
|
||||||
}
|
|
||||||
|
|
||||||
ds.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
|
||||||
pub struct Workspace {
|
|
||||||
pub id: u64,
|
|
||||||
pub organization_id: u64,
|
|
||||||
pub name: String,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
pub enum TogglError {
|
pub enum TogglError {
|
||||||
#[error("Toggl returned error: {0}")]
|
#[error("Toggl returned error: {0}")]
|
||||||
|
|||||||
27
src/toggl/support.rs
Normal file
27
src/toggl/support.rs
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
use axum::async_trait;
|
||||||
|
use governor::clock::DefaultClock;
|
||||||
|
use governor::state::{InMemoryState, NotKeyed};
|
||||||
|
use std::num::NonZero;
|
||||||
|
|
||||||
|
pub struct ReqwestRateLimiter {
|
||||||
|
rate_limiter: governor::RateLimiter<NotKeyed, InMemoryState, DefaultClock>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReqwestRateLimiter {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
rate_limiter: governor::RateLimiter::direct(governor::Quota::per_second(
|
||||||
|
NonZero::new(1u32).unwrap(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl reqwest_ratelimit::RateLimiter for ReqwestRateLimiter {
|
||||||
|
async fn acquire_permit(&self) {
|
||||||
|
// We don't need to introduce jitter here as that is handled by the retry_request
|
||||||
|
// middleware.
|
||||||
|
self.rate_limiter.until_ready().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
348
src/toggl/types.rs
Normal file
348
src/toggl/types.rs
Normal file
@ -0,0 +1,348 @@
|
|||||||
|
use chrono::{DateTime, NaiveDate, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_with::skip_serializing_none;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
||||||
|
pub struct TimeEntry {
|
||||||
|
pub id: u64,
|
||||||
|
pub workspace_id: u64,
|
||||||
|
pub user_id: u64,
|
||||||
|
pub project_id: Option<u64>,
|
||||||
|
pub task_id: Option<u64>,
|
||||||
|
|
||||||
|
pub start: DateTime<Utc>,
|
||||||
|
pub stop: Option<DateTime<Utc>>,
|
||||||
|
|
||||||
|
#[serde(with = "duration_field")]
|
||||||
|
pub duration: Option<u32>,
|
||||||
|
|
||||||
|
#[serde(rename = "at")]
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
|
||||||
|
pub description: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub tags: Vec<String>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub tag_ids: Vec<u64>,
|
||||||
|
|
||||||
|
pub billable: bool,
|
||||||
|
pub server_deleted_at: Option<DateTime<Utc>>,
|
||||||
|
pub permissions: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
mod duration_field {
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
pub fn serialize<S>(duration: &Option<u32>, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
match duration {
|
||||||
|
None => i32::serialize(&-1, serializer),
|
||||||
|
Some(duration) => i32::serialize(&(*duration as i32), serializer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<u32>, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let duration = i32::deserialize(deserializer)?;
|
||||||
|
if duration < 0 {
|
||||||
|
Ok(None)
|
||||||
|
} else {
|
||||||
|
Ok(Some(duration as u32))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
||||||
|
pub struct Project {
|
||||||
|
pub id: i64,
|
||||||
|
pub workspace_id: i64,
|
||||||
|
pub client_id: Option<i64>,
|
||||||
|
|
||||||
|
pub name: String,
|
||||||
|
pub color: String,
|
||||||
|
pub status: ProjectStatus,
|
||||||
|
pub active: bool,
|
||||||
|
|
||||||
|
#[serde(rename = "at")]
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
pub start_date: NaiveDate,
|
||||||
|
pub created_at: DateTime<Utc>,
|
||||||
|
pub server_deleted_at: Option<DateTime<Utc>>,
|
||||||
|
|
||||||
|
pub actual_hours: Option<i64>,
|
||||||
|
pub actual_seconds: Option<i64>,
|
||||||
|
pub can_track_time: bool,
|
||||||
|
pub permissions: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum ProjectStatus {
|
||||||
|
Upcoming,
|
||||||
|
Active,
|
||||||
|
Archived,
|
||||||
|
Ended,
|
||||||
|
Deleted,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for ProjectStatus {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}",
|
||||||
|
match self {
|
||||||
|
ProjectStatus::Upcoming => "upcoming",
|
||||||
|
ProjectStatus::Active => "active",
|
||||||
|
ProjectStatus::Archived => "archived",
|
||||||
|
ProjectStatus::Ended => "ended",
|
||||||
|
ProjectStatus::Deleted => "deleted",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
||||||
|
pub struct TrackingClient {
|
||||||
|
/// The unique identifier for the client.
|
||||||
|
pub id: i64,
|
||||||
|
|
||||||
|
/// Represents the timestamp of the last update made to the client.
|
||||||
|
#[serde(rename = "at")]
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
|
||||||
|
/// Indicates whether the client is archived or not.
|
||||||
|
pub archived: bool,
|
||||||
|
|
||||||
|
pub creator_id: i64,
|
||||||
|
pub integration_provider: Option<String>,
|
||||||
|
pub notes: Option<String>,
|
||||||
|
|
||||||
|
/// The name of the client.
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
/// Indicates the timestamp when the client was deleted. If the client is not deleted, this property will be null.
|
||||||
|
pub server_deleted_at: Option<DateTime<Utc>>,
|
||||||
|
|
||||||
|
/// The Workspace ID associated with the client.
|
||||||
|
#[serde(rename = "wid")]
|
||||||
|
pub workspace_id: i64,
|
||||||
|
|
||||||
|
pub permissions: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Soars, Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct Tag {
|
||||||
|
pub id: u64,
|
||||||
|
pub name: String,
|
||||||
|
pub workspace_id: u64,
|
||||||
|
pub creator_id: u64,
|
||||||
|
#[serde(rename = "at")]
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
pub deleted_at: Option<DateTime<Utc>>,
|
||||||
|
pub permissions: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct ReportEntry {
|
||||||
|
pub user_id: u64,
|
||||||
|
pub username: String,
|
||||||
|
pub project_id: Option<u64>,
|
||||||
|
pub task_id: Option<u64>,
|
||||||
|
pub billable: bool,
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub tag_ids: Vec<u64>,
|
||||||
|
pub billable_amount_in_cents: Option<u64>,
|
||||||
|
pub hourly_rate_in_cents: Option<u64>,
|
||||||
|
pub currency: String,
|
||||||
|
pub time_entries: Vec<ReportEntryTimeDetails>,
|
||||||
|
pub row_number: u32,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub enriched_information: Option<ReportEntryEnrichedInfo>,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub rest: HashMap<String, serde_json::Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct ReportEntryEnrichedInfo {
|
||||||
|
pub project_id: Option<u64>,
|
||||||
|
pub project_name: Option<String>,
|
||||||
|
pub project_hex: Option<String>,
|
||||||
|
|
||||||
|
pub tag_names: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Serialize, Deserialize, Debug)]
|
||||||
|
pub struct ReportEntryTimeDetails {
|
||||||
|
pub id: u64,
|
||||||
|
pub seconds: u32,
|
||||||
|
pub start: DateTime<Utc>,
|
||||||
|
pub stop: DateTime<Utc>,
|
||||||
|
#[serde(rename = "at")]
|
||||||
|
pub updated_at: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReportEntry {
|
||||||
|
pub fn into_time_entry(self, workspace_id: u64) -> TimeEntry {
|
||||||
|
TimeEntry {
|
||||||
|
id: self.time_entries[0].id,
|
||||||
|
workspace_id,
|
||||||
|
user_id: self.user_id,
|
||||||
|
project_id: self.project_id,
|
||||||
|
task_id: self.task_id,
|
||||||
|
start: self.time_entries[0].start,
|
||||||
|
stop: Some(self.time_entries[0].stop),
|
||||||
|
duration: Some(self.time_entries[0].seconds),
|
||||||
|
updated_at: self.time_entries[0].updated_at,
|
||||||
|
description: self.description,
|
||||||
|
tags: self
|
||||||
|
.enriched_information
|
||||||
|
.map(|e| e.tag_names.clone())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
|
||||||
|
tag_ids: self.tag_ids,
|
||||||
|
billable: self.billable,
|
||||||
|
server_deleted_at: None,
|
||||||
|
permissions: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[skip_serializing_none]
|
||||||
|
#[derive(Serialize, Deserialize, Clone, Default)]
|
||||||
|
pub struct TogglReportFilters {
|
||||||
|
pub billable: Option<bool>,
|
||||||
|
pub client_ids: Option<Vec<u64>>,
|
||||||
|
pub description: Option<String>,
|
||||||
|
pub end_date: Option<NaiveDate>,
|
||||||
|
pub enrich_response: Option<bool>,
|
||||||
|
pub first_id: Option<u64>,
|
||||||
|
pub first_row_number: Option<u64>,
|
||||||
|
pub first_timestamp: Option<u64>,
|
||||||
|
pub group_ids: Option<Vec<u64>>,
|
||||||
|
pub grouped: Option<bool>,
|
||||||
|
pub hide_amounts: Option<bool>,
|
||||||
|
pub max_duration_seconds: Option<u64>,
|
||||||
|
pub min_duration_seconds: Option<u64>,
|
||||||
|
pub order_by: Option<String>,
|
||||||
|
pub order_dir: Option<String>,
|
||||||
|
pub page_size: Option<u64>,
|
||||||
|
#[serde(rename = "postedFields")]
|
||||||
|
pub posted_fields: Option<Vec<String>>,
|
||||||
|
pub project_ids: Option<Vec<u64>>,
|
||||||
|
pub rounding: Option<u64>,
|
||||||
|
pub rounding_minutes: Option<u64>,
|
||||||
|
#[serde(rename = "startTime")]
|
||||||
|
pub start_time: Option<String>,
|
||||||
|
pub start_date: Option<NaiveDate>,
|
||||||
|
pub tag_ids: Option<Vec<u64>>,
|
||||||
|
pub task_ids: Option<Vec<u64>>,
|
||||||
|
pub time_entry_ids: Option<Vec<u64>>,
|
||||||
|
pub user_ids: Option<Vec<u64>>,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub rest: HashMap<String, serde_json::Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
use soa_rs::Soars;
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
impl fmt::Debug for TogglReportFilters {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
let mut ds = f.debug_struct("TogglQuery");
|
||||||
|
|
||||||
|
if let Some(billable) = &self.billable {
|
||||||
|
ds.field("billable", billable);
|
||||||
|
}
|
||||||
|
if let Some(client_ids) = &self.client_ids {
|
||||||
|
ds.field("client_ids", client_ids);
|
||||||
|
}
|
||||||
|
if let Some(description) = &self.description {
|
||||||
|
ds.field("description", description);
|
||||||
|
}
|
||||||
|
if let Some(end_date) = &self.end_date {
|
||||||
|
ds.field("end_date", end_date);
|
||||||
|
}
|
||||||
|
if let Some(first_id) = &self.first_id {
|
||||||
|
ds.field("first_id", first_id);
|
||||||
|
}
|
||||||
|
if let Some(first_row_number) = &self.first_row_number {
|
||||||
|
ds.field("first_row_number", first_row_number);
|
||||||
|
}
|
||||||
|
if let Some(first_timestamp) = &self.first_timestamp {
|
||||||
|
ds.field("first_timestamp", first_timestamp);
|
||||||
|
}
|
||||||
|
if let Some(group_ids) = &self.group_ids {
|
||||||
|
ds.field("group_ids", group_ids);
|
||||||
|
}
|
||||||
|
if let Some(grouped) = &self.grouped {
|
||||||
|
ds.field("grouped", grouped);
|
||||||
|
}
|
||||||
|
if let Some(hide_amounts) = &self.hide_amounts {
|
||||||
|
ds.field("hide_amounts", hide_amounts);
|
||||||
|
}
|
||||||
|
if let Some(max_duration_seconds) = &self.max_duration_seconds {
|
||||||
|
ds.field("max_duration_seconds", max_duration_seconds);
|
||||||
|
}
|
||||||
|
if let Some(min_duration_seconds) = &self.min_duration_seconds {
|
||||||
|
ds.field("min_duration_seconds", min_duration_seconds);
|
||||||
|
}
|
||||||
|
if let Some(order_by) = &self.order_by {
|
||||||
|
ds.field("order_by", order_by);
|
||||||
|
}
|
||||||
|
if let Some(order_dir) = &self.order_dir {
|
||||||
|
ds.field("order_dir", order_dir);
|
||||||
|
}
|
||||||
|
if let Some(posted_fields) = &self.posted_fields {
|
||||||
|
ds.field("postedFields", posted_fields);
|
||||||
|
}
|
||||||
|
if let Some(project_ids) = &self.project_ids {
|
||||||
|
ds.field("project_ids", project_ids);
|
||||||
|
}
|
||||||
|
if let Some(rounding) = &self.rounding {
|
||||||
|
ds.field("rounding", rounding);
|
||||||
|
}
|
||||||
|
if let Some(rounding_minutes) = &self.rounding_minutes {
|
||||||
|
ds.field("rounding_minutes", rounding_minutes);
|
||||||
|
}
|
||||||
|
if let Some(start_time) = &self.start_time {
|
||||||
|
ds.field("startTime", start_time);
|
||||||
|
}
|
||||||
|
if let Some(start_date) = &self.start_date {
|
||||||
|
ds.field("start_date", start_date);
|
||||||
|
}
|
||||||
|
if let Some(tag_ids) = &self.tag_ids {
|
||||||
|
ds.field("tag_ids", tag_ids);
|
||||||
|
}
|
||||||
|
if let Some(task_ids) = &self.task_ids {
|
||||||
|
ds.field("task_ids", task_ids);
|
||||||
|
}
|
||||||
|
if let Some(time_entry_ids) = &self.time_entry_ids {
|
||||||
|
ds.field("time_entry_ids", time_entry_ids);
|
||||||
|
}
|
||||||
|
if let Some(user_ids) = &self.user_ids {
|
||||||
|
ds.field("user_ids", user_ids);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.rest.is_empty() {
|
||||||
|
ds.field("rest", &self.rest);
|
||||||
|
}
|
||||||
|
|
||||||
|
ds.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone, Soars)]
|
||||||
|
pub struct Workspace {
|
||||||
|
pub id: u64,
|
||||||
|
pub organization_id: u64,
|
||||||
|
pub name: String,
|
||||||
|
}
|
||||||
343
src/worker.rs
Normal file
343
src/worker.rs
Normal file
@ -0,0 +1,343 @@
|
|||||||
|
use crate::toggl::types::{Project, Tag, TimeEntry, TogglReportFilters, TrackingClient};
|
||||||
|
use crate::toggl::TogglApi;
|
||||||
|
use crate::{AppError, TableSummary};
|
||||||
|
use chrono::{DateTime, NaiveDate, TimeDelta, Utc};
|
||||||
|
use itertools::Itertools;
|
||||||
|
use soa_rs::Soa;
|
||||||
|
use sqlx::PgPool;
|
||||||
|
|
||||||
|
macro_rules! cast_slice {
|
||||||
|
($slice: expr, $typ: ty) => {
|
||||||
|
&($slice.iter().map(|id| *id as $typ).collect_vec())[..]
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! cast_slice_opts {
|
||||||
|
($slice: expr, $typ: ty) => {
|
||||||
|
$slice
|
||||||
|
.iter()
|
||||||
|
.map(|opt| opt.map(|id| id as $typ))
|
||||||
|
.collect_vec()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Worker {
|
||||||
|
pub(crate) db: PgPool,
|
||||||
|
pub(crate) toggl_api: TogglApi,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Worker {
|
||||||
|
async fn get_ids(&self) -> Result<TableSummary, AppError> {
|
||||||
|
let client_ids = sqlx::query!("select id from tracking_clients")
|
||||||
|
.fetch_all(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let workspace_ids = sqlx::query!("select id from workspaces")
|
||||||
|
.fetch_all(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let project_ids = sqlx::query!("select id from projects")
|
||||||
|
.fetch_all(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let tag_ids = sqlx::query!("select id from tags")
|
||||||
|
.fetch_all(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(TableSummary {
|
||||||
|
client_ids: client_ids.iter().map(|row| row.id as u64).collect(),
|
||||||
|
workspace_ids: workspace_ids.iter().map(|row| row.id as u64).collect(),
|
||||||
|
project_ids: project_ids.iter().map(|row| row.id as u64).collect(),
|
||||||
|
tag_ids: tag_ids.iter().map(|row| row.id as u64).collect(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fetch_within(&self, start: NaiveDate, end: NaiveDate) -> Result<(), AppError> {
|
||||||
|
let results = self
|
||||||
|
.toggl_api
|
||||||
|
.search(
|
||||||
|
self.toggl_api.workspace_id,
|
||||||
|
TogglReportFilters {
|
||||||
|
start_date: Some(start),
|
||||||
|
end_date: Some(end),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let time_entries = results
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| entry.into_time_entry(self.toggl_api.workspace_id))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
self.update_database(time_entries).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fetch_changed_since(&self, look_back: TimeDelta) -> Result<(), AppError> {
|
||||||
|
if look_back > TimeDelta::days(90) {
|
||||||
|
return Err(AppError::LookBackTooLarge);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.update_time_entries(Utc::now() - look_back).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update(&self, default_look_back: TimeDelta) -> Result<(), AppError> {
|
||||||
|
let result = sqlx::query!("select max(updated_at) as last_updated_at from time_entries")
|
||||||
|
.fetch_optional(&self.db)
|
||||||
|
.await
|
||||||
|
.expect("Could not fetch max updated_at from time_entries");
|
||||||
|
|
||||||
|
let fetch_since = result
|
||||||
|
.and_then(|record| record.last_updated_at)
|
||||||
|
.unwrap_or_else(|| Utc::now() - default_look_back);
|
||||||
|
|
||||||
|
self.update_time_entries(fetch_since).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_time_entries(&self, fetch_since: DateTime<Utc>) -> Result<(), AppError> {
|
||||||
|
let time_entries = self
|
||||||
|
.toggl_api
|
||||||
|
.get_time_entries_for_user_modified_since(fetch_since)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.update_database(time_entries).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_database(&self, time_entries: Vec<TimeEntry>) -> Result<(), AppError> {
|
||||||
|
let existing_ids = self.get_ids().await?;
|
||||||
|
|
||||||
|
let fetch_workspaces = time_entries
|
||||||
|
.iter()
|
||||||
|
.map(|entry| entry.workspace_id)
|
||||||
|
.filter(|workspace_id| !existing_ids.workspace_ids.contains(&workspace_id))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let fetch_projects = time_entries
|
||||||
|
.iter()
|
||||||
|
.map(|entry| entry.project_id)
|
||||||
|
.filter_map(|project_id| project_id)
|
||||||
|
.any(|project_id| !existing_ids.project_ids.contains(&project_id));
|
||||||
|
|
||||||
|
let fetch_tags = time_entries
|
||||||
|
.iter()
|
||||||
|
.flat_map(|entry| entry.tag_ids.iter())
|
||||||
|
.any(|tag| !existing_ids.tag_ids.contains(&tag));
|
||||||
|
|
||||||
|
if !fetch_workspaces.is_empty() {
|
||||||
|
self.update_workspaces(&fetch_workspaces).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetch_projects {
|
||||||
|
self.update_projects(&existing_ids).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetch_tags {
|
||||||
|
self.update_tags().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for time_entries_chunk in time_entries.chunks(100) {
|
||||||
|
self.update_time_entries_chunk(time_entries_chunk).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_time_entries_chunk(&self, time_entries: &[TimeEntry]) -> Result<(), AppError> {
|
||||||
|
let time_entries = Soa::from(time_entries);
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
r#"
|
||||||
|
INSERT INTO time_entries (id, workspace_id, user_id, project_id, task_id, start, stop, duration, updated_at, description, billable, server_deleted_at, permissions)
|
||||||
|
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::bigint[], $4::bigint[], $5::bigint[], $6::timestamptz[], $7::timestamptz[], $8::int[], $9::timestamptz[], $10::text[], $11::bool[], $12::timestamptz[], $13::text[])
|
||||||
|
ON CONFLICT (id) DO UPDATE SET
|
||||||
|
workspace_id = excluded.workspace_id,
|
||||||
|
user_id = excluded.user_id,
|
||||||
|
project_id = excluded.project_id,
|
||||||
|
task_id = excluded.task_id,
|
||||||
|
start = excluded.start,
|
||||||
|
stop = excluded.stop,
|
||||||
|
duration = excluded.duration,
|
||||||
|
updated_at = excluded.updated_at,
|
||||||
|
description = excluded.description,
|
||||||
|
billable = excluded.billable,
|
||||||
|
server_deleted_at = excluded.server_deleted_at,
|
||||||
|
permissions = excluded.permissions
|
||||||
|
"#,
|
||||||
|
cast_slice!(time_entries.id(), i64),
|
||||||
|
cast_slice!(time_entries.workspace_id(), i64),
|
||||||
|
cast_slice!(time_entries.user_id(), i64),
|
||||||
|
cast_slice_opts!(time_entries.project_id(), i64) as _,
|
||||||
|
cast_slice_opts!(time_entries.task_id(), i64) as _,
|
||||||
|
time_entries.start(),
|
||||||
|
time_entries.stop() as _,
|
||||||
|
cast_slice_opts!(time_entries.duration(), i32) as _,
|
||||||
|
time_entries.updated_at(),
|
||||||
|
time_entries.description() as _,
|
||||||
|
time_entries.billable(),
|
||||||
|
time_entries.server_deleted_at() as _,
|
||||||
|
time_entries.permissions() as _,
|
||||||
|
)
|
||||||
|
.execute(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_workspaces(&self, workspace_ids: &[u64]) -> Result<(), AppError> {
|
||||||
|
let workspaces = workspace_ids
|
||||||
|
.iter()
|
||||||
|
.map(|id| self.toggl_api.get_workspace(*id));
|
||||||
|
|
||||||
|
let workspaces = futures::future::join_all(workspaces)
|
||||||
|
.await
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
|
|
||||||
|
let workspaces = Soa::from(workspaces.as_slice());
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
r#"
|
||||||
|
INSERT INTO workspaces (id, organization_id, name)
|
||||||
|
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::text[])
|
||||||
|
ON CONFLICT (id) DO UPDATE SET
|
||||||
|
organization_id = excluded.organization_id,
|
||||||
|
name = excluded.name
|
||||||
|
"#,
|
||||||
|
cast_slice!(workspaces.id(), i64),
|
||||||
|
cast_slice!(workspaces.organization_id(), i64),
|
||||||
|
workspaces.name(),
|
||||||
|
)
|
||||||
|
.execute(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_projects(&self, existing_ids: &TableSummary) -> Result<(), AppError> {
|
||||||
|
let projects = self.toggl_api.get_projects().await?;
|
||||||
|
|
||||||
|
let fetch_clients = projects
|
||||||
|
.iter()
|
||||||
|
.map(|project| project.client_id)
|
||||||
|
.filter_map(|client_id| client_id)
|
||||||
|
.any(|client_id| !existing_ids.client_ids.contains(&(client_id as u64)));
|
||||||
|
|
||||||
|
if fetch_clients {
|
||||||
|
self.update_clients().await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let projects: Soa<Project> = Soa::from(projects.as_slice());
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
r#"
|
||||||
|
INSERT INTO projects (id, workspace_id, client_id, name, color, status, active, updated_at, start_date, created_at, server_deleted_at, actual_hours, actual_seconds, can_track_time, permissions)
|
||||||
|
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::bigint[], $4::text[], $5::text[], $6::text[], $7::bool[], $8::timestamptz[], $9::date[], $10::timestamptz[], $11::timestamptz[], $12::int[], $13::int[], $14::bool[], $15::text[])
|
||||||
|
ON CONFLICT (id) DO UPDATE SET
|
||||||
|
workspace_id = excluded.workspace_id,
|
||||||
|
client_id = excluded.client_id,
|
||||||
|
name = excluded.name,
|
||||||
|
color = excluded.color,
|
||||||
|
status = excluded.status,
|
||||||
|
active = excluded.active,
|
||||||
|
updated_at = excluded.updated_at,
|
||||||
|
start_date = excluded.start_date,
|
||||||
|
created_at = excluded.created_at,
|
||||||
|
server_deleted_at = excluded.server_deleted_at,
|
||||||
|
actual_hours = excluded.actual_hours,
|
||||||
|
actual_seconds = excluded.actual_seconds,
|
||||||
|
can_track_time = excluded.can_track_time,
|
||||||
|
permissions = excluded.permissions
|
||||||
|
"#,
|
||||||
|
cast_slice!(projects.id(), i64),
|
||||||
|
cast_slice!(projects.workspace_id(), i64),
|
||||||
|
cast_slice_opts!(projects.client_id(), i64) as _,
|
||||||
|
projects.name(),
|
||||||
|
projects.color(),
|
||||||
|
&projects.status().iter().map(|s| s.to_string()).collect::<Vec<_>>()[..],
|
||||||
|
projects.active(),
|
||||||
|
projects.updated_at(),
|
||||||
|
projects.start_date() as _,
|
||||||
|
projects.created_at(),
|
||||||
|
projects.server_deleted_at() as _,
|
||||||
|
cast_slice_opts!(projects.actual_hours(), i64) as _,
|
||||||
|
cast_slice_opts!(projects.actual_seconds(), i64) as _,
|
||||||
|
projects.can_track_time(),
|
||||||
|
projects.permissions() as _,
|
||||||
|
)
|
||||||
|
.execute(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn update_tags(&self) -> Result<(), AppError> {
|
||||||
|
let tags = self.toggl_api.get_tags().await?;
|
||||||
|
let tags: Soa<Tag> = Soa::from(tags.as_slice());
|
||||||
|
|
||||||
|
sqlx::query!(
|
||||||
|
r#"
|
||||||
|
INSERT INTO tags (id, name, workspace_id, creator_id, updated_at, deleted_at, permissions)
|
||||||
|
SELECT * FROM UNNEST($1::bigint[], $2::text[], $3::bigint[], $4::bigint[], $5::timestamptz[], $6::timestamptz[], $7::text[])
|
||||||
|
ON CONFLICT (id) DO UPDATE SET
|
||||||
|
name = excluded.name,
|
||||||
|
workspace_id = excluded.workspace_id,
|
||||||
|
creator_id = excluded.creator_id,
|
||||||
|
updated_at = excluded.updated_at,
|
||||||
|
deleted_at = excluded.deleted_at,
|
||||||
|
permissions = excluded.permissions
|
||||||
|
"#,
|
||||||
|
cast_slice!(tags.id(), i64),
|
||||||
|
tags.name(),
|
||||||
|
cast_slice!(tags.workspace_id(), i64),
|
||||||
|
cast_slice!(tags.creator_id(), i64),
|
||||||
|
tags.updated_at(),
|
||||||
|
// Nullable fields fail to type check with UNNEST batch inserts so we silence the
|
||||||
|
// errors using ` as _`.
|
||||||
|
tags.deleted_at() as _,
|
||||||
|
tags.permissions() as _,
|
||||||
|
)
|
||||||
|
.execute(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_clients(&self) -> Result<(), AppError> {
|
||||||
|
let clients = self.toggl_api.get_clients().await?;
|
||||||
|
|
||||||
|
let clients: Soa<TrackingClient> = Soa::from(clients.as_slice());
|
||||||
|
|
||||||
|
sqlx::query!(r#"
|
||||||
|
INSERT INTO tracking_clients
|
||||||
|
(id, updated_at, archived, creator_id, integration_provider, notes, name, server_deleted_at, workspace_id, permissions)
|
||||||
|
SELECT * FROM UNNEST($1::bigint[], $2::timestamptz[], $3::bool[], $4::bigint[], $5::text[], $6::text[], $7::text[], $8::timestamptz[], $9::bigint[], $10::text[])
|
||||||
|
ON CONFLICT (id) DO UPDATE SET
|
||||||
|
updated_at = excluded.updated_at,
|
||||||
|
archived = excluded.archived,
|
||||||
|
creator_id = excluded.creator_id,
|
||||||
|
integration_provider = excluded.integration_provider,
|
||||||
|
notes = excluded.notes,
|
||||||
|
name = excluded.name,
|
||||||
|
server_deleted_at = excluded.server_deleted_at,
|
||||||
|
workspace_id = excluded.workspace_id,
|
||||||
|
permissions = excluded.permissions
|
||||||
|
"#,
|
||||||
|
cast_slice!(clients.id(), i64),
|
||||||
|
clients.updated_at(),
|
||||||
|
clients.archived(),
|
||||||
|
cast_slice!(clients.creator_id(), i64),
|
||||||
|
// For the next two, we are assuming these are Option<String> as datatype. If these are different, please update accordingly.
|
||||||
|
clients.integration_provider() as _,
|
||||||
|
clients.notes() as _,
|
||||||
|
clients.name(),
|
||||||
|
clients.server_deleted_at() as _,
|
||||||
|
cast_slice!(clients.workspace_id(), i64),
|
||||||
|
clients.permissions() as _
|
||||||
|
)
|
||||||
|
.execute(&self.db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user