Compare commits

...

23 Commits

Author SHA1 Message Date
5f2120ea18 Fix build label 2024-03-03 20:54:00 +00:00
b7102e12a6 Run rustfmt and fix some clippy lints 2024-03-03 20:53:41 +00:00
479389dde6 Report update statistics 2024-03-03 20:49:27 +00:00
7759632848 Migrate database updates to use TimeEntrys over ReportRows 2024-03-03 20:35:53 +00:00
62657f2bdd Refactor poll to use the time_entry based approach from now on 2024-03-03 19:53:44 +00:00
889859dbae Run rustfmt and move cache_report into sync_service.rs 2024-03-03 19:53:20 +00:00
73b3e2cb96 Update Datetime handling in API and DB layers
Changed the handling of start, stop, and update timestamp fields in ReportRowInnerTimeEntry data structure from strings to DateTime objects in the API layer. Also updated the data persisting in DB layer to use these new datetime objects and introduced additional columns for server_updated_at, server_deleted_at and tags.
2024-03-03 18:21:00 +00:00
ce60eaf310 Add time_entry columns 2024-03-03 18:15:40 +00:00
573d7f8635 Update columns on conflict 2024-03-02 17:13:22 +00:00
b7da62536d Improve time types 2024-03-02 17:12:46 +00:00
0f3c031b7a Link up db types 2024-03-02 16:59:04 +00:00
84b4c747f5 Add project fields 2024-03-02 16:54:15 +00:00
b722b21a75 Add time entries handling functions in Toggl API client 2024-03-02 10:21:33 +00:00
90457155aa Refactor Toggl API types
Significant renaming and data type changes to Toggl API types occurred to improve identification and consistency. For example, 'Current' is renamed to 'TimeEntry', 'ProjectClient' is renamed to 'Client', and 'TogglQuery' is renamed to 'TogglReportQuery'. Changes affected multiple sections of the codebase, including routes and DB access.
2024-03-02 10:10:30 +00:00
9cfe56b3ce Refactor ReportEntry to ReportRow and enhance request builder
Changed the name of 'ReportEntry' to 'ReportRow' for clarity. Also simplified the request builder in the TogglApiClient by implementing a 'make_request' function which helps in handling rate limiting. The function is then used in various methods of the TogglApiClient.
2024-03-02 10:00:15 +00:00
991e7fd746 Make the auth HeaderValue sensitive 2024-03-02 09:24:54 +00:00
b9b211723e Fix a bunch of warnings 2024-03-02 09:22:47 +00:00
43399c28b1 Separate out routes 2024-03-02 09:18:37 +00:00
ac049beff7 Stash changes 2024-03-02 09:14:27 +00:00
f08397ab15 Fix build issue due to csv parsing 2024-03-01 21:23:51 +00:00
ee21753411 Centralise files related to talking to the Toggl apis to the toggl_api module 2024-03-01 21:21:33 +00:00
1df76ea573 Rename TogglClient to TogglApiClient 2024-03-01 21:18:16 +00:00
82d9cf5c87 Add CSV column mapping and reconstruction of basic information 2024-03-01 21:11:01 +00:00
28 changed files with 3771 additions and 507 deletions

3
.env
View File

@ -1 +1,4 @@
DATABASE_URL=postgres://postgres@localhost:5432/toggl_portal
TOGGL_API_TOKEN=237918c4e008f5aeefe886c9112ab560
TOGGL_WORKSPACE_ID=2837131
WORKSPACE_ID=${TOGGL_WORKSPACE_ID}

View File

@ -30,7 +30,7 @@ jobs:
with:
context: .
push: true
tags: git.joshuacoles.me/personal/beachhead-services/toggl-portal:arm
tags: git.joshuacoles.me/personal/toggl-portal:arm
build-args: |
APP_NAME=toggl-portal
PACKAGE_NAME=toggl-portal

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
/target
/migration/target
/.idea
/ignore

1
Cargo.lock generated
View File

@ -1630,6 +1630,7 @@ version = "0.1.0"
dependencies = [
"async-std",
"sea-orm-migration",
"serde_json",
]
[[package]]

10
GRAFTING.md Normal file
View File

@ -0,0 +1,10 @@
- Possible sources
- report
- me/time_enries
- This will need to filter by `workspace_id` if don't want to deal with multiple workspaces.
- csv
- This can give us a complete picture of the time entries, but it lacks an `id` field so cannot be easily updated
- The first two contain `at` which is when the time entry was last updated, useful for grafting
- `me/time_entries` can get anything **updated** since a given time
- This is incredibly useful for updating the time entries
- Most historic time entries are not ever changed so a csv of say, 2023 is probably alwasy going to be valid

5
README.md Normal file
View File

@ -0,0 +1,5 @@
# Toggl Portal
## Purpose
- Act as an authenticated client for other apps to use to access Toggl data in addition to caching this data for other
data analysis purposes.

2811
migration/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -10,10 +10,12 @@ path = "src/lib.rs"
[dependencies]
async-std = { version = "1", features = ["attributes", "tokio1"] }
serde_json = "^1.0"
[dependencies.sea-orm-migration]
version = "0.12.0"
features = [
"runtime-tokio-rustls", # `ASYNC_RUNTIME` feature
"sqlx-postgres", # `DATABASE_DRIVER` feature
'with-json'
]

View File

@ -4,6 +4,8 @@ mod m20231101_172500_create_time_entry_table;
mod m20231106_134950_create_clients;
mod m20231106_195401_create_projects;
mod m20231106_201029_add_time_entry_project_fk;
mod m20240302_102418_update_project_table;
mod m20240302_171651_update_time_entry_table;
pub struct Migrator;
@ -15,6 +17,8 @@ impl MigratorTrait for Migrator {
Box::new(m20231106_134950_create_clients::Migration),
Box::new(m20231106_195401_create_projects::Migration),
Box::new(m20231106_201029_add_time_entry_project_fk::Migration),
Box::new(m20240302_102418_update_project_table::Migration),
Box::new(m20240302_171651_update_time_entry_table::Migration),
]
}

View File

@ -19,11 +19,23 @@ impl MigrationTrait for Migration {
.primary_key(),
)
.col(
ColumnDef::new(TimeEntry::TogglId).big_unsigned().not_null().unique_key())
ColumnDef::new(TimeEntry::TogglId)
.big_unsigned()
.not_null()
.unique_key(),
)
.col(ColumnDef::new(TimeEntry::Description).string().not_null())
.col(ColumnDef::new(TimeEntry::ProjectId).big_unsigned())
.col(ColumnDef::new(TimeEntry::Start).timestamp_with_time_zone().not_null())
.col(ColumnDef::new(TimeEntry::Stop).timestamp_with_time_zone().not_null())
.col(
ColumnDef::new(TimeEntry::Start)
.timestamp_with_time_zone()
.not_null(),
)
.col(
ColumnDef::new(TimeEntry::Stop)
.timestamp_with_time_zone()
.not_null(),
)
.col(ColumnDef::new(TimeEntry::RawJson).json_binary().not_null())
.to_owned(),
)
@ -46,5 +58,5 @@ enum TimeEntry {
ProjectId,
Start,
Stop,
RawJson
RawJson,
}

View File

@ -20,7 +20,11 @@ impl MigrationTrait for Migration {
.col(ColumnDef::new(Client::Name).string().not_null())
.col(ColumnDef::new(Client::Archived).boolean().not_null())
.col(ColumnDef::new(Client::WorkspaceId).integer().not_null())
.col(ColumnDef::new(Client::At).timestamp_with_time_zone().not_null())
.col(
ColumnDef::new(Client::At)
.timestamp_with_time_zone()
.not_null(),
)
.col(ColumnDef::new(Client::ServerDeletedAt).timestamp_with_time_zone())
.to_owned(),
)
@ -42,5 +46,5 @@ enum Client {
Archived,
WorkspaceId,
At,
ServerDeletedAt
ServerDeletedAt,
}

View File

@ -11,8 +11,19 @@ impl MigrationTrait for Migration {
Table::create()
.table(Project::Table)
.if_not_exists()
.col(ColumnDef::new(Project::Id).integer().primary_key().auto_increment().not_null())
.col(ColumnDef::new(Project::TogglId).big_unsigned().not_null().unique_key())
.col(
ColumnDef::new(Project::Id)
.integer()
.primary_key()
.auto_increment()
.not_null(),
)
.col(
ColumnDef::new(Project::TogglId)
.big_unsigned()
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(Project::WorkspaceId)
.big_unsigned()
@ -27,13 +38,15 @@ impl MigrationTrait for Migration {
.await?;
// Create foreign key
manager.create_foreign_key(
ForeignKey::create()
.name("project_client_id")
.from(Project::Table, Project::ClientId)
.to(Client::Table, Client::Id)
.to_owned(),
).await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("project_client_id")
.from(Project::Table, Project::ClientId)
.to(Client::Table, Client::Id)
.to_owned(),
)
.await?;
Ok(())
}

View File

@ -0,0 +1,69 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.add_column(ColumnDef::new(Project::Color).text())
.add_column(ColumnDef::new(Project::ServerCreatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerUpdatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerDeletedAt).timestamp_with_time_zone())
.to_owned(),
)
.await?;
manager
.get_connection()
.execute_unprepared(
r#"
update "project"
set "color" = raw_json ->> 'color',
"server_created_at" = (raw_json ->> 'created_at') :: timestamptz,
"server_updated_at" = (raw_json ->> 'at') :: timestamptz,
"server_deleted_at" = (raw_json ->> 'server_deleted_at') :: timestamptz
"#,
)
.await?;
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.modify_column(ColumnDef::new(Project::Color).not_null())
.modify_column(ColumnDef::new(Project::ServerCreatedAt).not_null())
.modify_column(ColumnDef::new(Project::ServerUpdatedAt).not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.drop_column(Project::Color)
.drop_column(Project::ServerCreatedAt)
.drop_column(Project::ServerUpdatedAt)
.drop_column(Project::ServerDeletedAt)
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum Project {
Table,
Color,
ServerCreatedAt,
ServerUpdatedAt,
ServerDeletedAt,
}

View File

@ -0,0 +1,70 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.add_column(
ColumnDef::new(TimeEntry::Tags)
.json_binary()
.default(serde_json::json!([]))
.not_null(),
)
.add_column(
ColumnDef::new(TimeEntry::ServerUpdatedAt).timestamp_with_time_zone(),
)
.add_column(
ColumnDef::new(TimeEntry::ServerDeletedAt).timestamp_with_time_zone(),
)
.to_owned(),
)
.await?;
manager
.get_connection()
.execute_unprepared(
r#"
update "time_entry"
set "tags" = coalesce(raw_json -> 'tags', '[]' :: jsonb),
"server_updated_at" = (raw_json ->> 'at') :: timestamptz;
"#,
)
.await?;
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.modify_column(ColumnDef::new(TimeEntry::ServerUpdatedAt).not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.drop_column(TimeEntry::Tags)
.drop_column(TimeEntry::ServerDeletedAt)
.drop_column(TimeEntry::ServerUpdatedAt)
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum TimeEntry {
Table,
ServerUpdatedAt,
ServerDeletedAt,
Tags,
}

View File

@ -1,178 +0,0 @@
use reqwest::Client;
use serde_json::Value;
use std::collections::HashMap;
use std::time::Duration;
use hyper::HeaderMap;
use tracing::instrument;
use tracing::log::debug;
use crate::types::{Current, Project, ProjectClient, ReportEntry, TogglQuery};
#[derive(Debug, Clone)]
pub struct TogglClient {
client: Client,
workspace_id: String,
base_url: String,
reports_base_url: String,
headers: HeaderMap,
}
impl TogglClient {
pub async fn check_health(&self) -> bool {
true
}
pub fn new(workspace_id: &str, toggl_auth: &str) -> Self {
let client = Client::builder()
.default_headers(Self::default_headers(toggl_auth))
.build()
.expect("Failed to build reqwest client");
Self {
client,
workspace_id: workspace_id.to_string(),
base_url: "https://api.track.toggl.com/api/v9".to_string(),
reports_base_url: "https://api.track.toggl.com/reports/api/v3".to_string(),
headers: Self::default_headers(toggl_auth),
}
}
fn default_headers(toggl_auth: &str) -> reqwest::header::HeaderMap {
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"Authorization",
reqwest::header::HeaderValue::from_str(&format!("Basic {}", toggl_auth)).unwrap(),
);
headers
}
pub async fn fetch_projects(&self) -> Result<Vec<Project>, reqwest::Error> {
let url = format!(
"{base_url}/workspaces/{}/projects",
self.workspace_id,
base_url = self.base_url,
);
let res = self
.client
.get(&url)
.headers(self.headers.clone())
.send()
.await?
.json::<Vec<Project>>()
.await
.unwrap();
Ok(res)
}
pub async fn fetch_clients(&self) -> Result<Vec<ProjectClient>, reqwest::Error> {
let url = format!(
"{base_url}/workspaces/{}/clients",
self.workspace_id,
base_url = self.base_url,
);
let res = self
.client
.get(&url)
.headers(self.headers.clone())
.send()
.await?
.json::<Vec<ProjectClient>>()
.await
.unwrap();
Ok(res)
}
pub async fn get_current(&self) -> Result<Option<Current>, reqwest::Error> {
let url = format!(
"{base_url}/me/time_entries/current",
base_url = self.base_url
);
let res = self
.client
.get(url)
.send()
.await?
.json::<Option<Current>>()
.await
.unwrap();
Ok(res)
}
fn create_filters(original_filters: &TogglQuery, last_row_id: u64) -> TogglQuery {
let mut filters: TogglQuery = original_filters.clone();
filters.first_row_number = Some(last_row_id + 1);
filters
}
#[instrument(skip(self, filters))]
pub async fn full_report(
&self,
filters: &TogglQuery,
) -> anyhow::Result<Vec<ReportEntry>> {
let url = format!(
"{base_url}/workspace/{workspace_id}/search/time_entries",
base_url = self.reports_base_url,
workspace_id = self.workspace_id
);
let mut last_row_number = Some(0);
let mut results = vec![];
while let Some(last_row_number_n) = last_row_number {
debug!("Fetching page starting with {}", last_row_number_n);
// If we are not on the first page, wait a bit to avoid rate limiting
if last_row_number_n != 0 {
tokio::time::sleep(Duration::from_millis(1000)).await;
}
// TODO: Implement rate limiting
let response = self
.client
.post(&url)
.headers(self.headers.clone())
.json(&Self::create_filters(&filters, last_row_number_n))
.send()
.await?;
let data = response
.json::<Vec<ReportEntry>>()
.await?;
last_row_number = data.last().map(|e| e.row_number as u64);
data.into_iter().for_each(|e| results.push(e));
}
Ok(results)
}
pub async fn start_time_entry(&self, mut body: HashMap<String, Value>) -> anyhow::Result<()> {
let url = format!(
"{base_url}/workspaces/{workspace_id}/time_entries",
base_url = self.base_url,
workspace_id = self.workspace_id
);
body.insert(
"workspace_id".to_string(),
self.workspace_id.parse::<i32>().unwrap().into(),
);
dbg!(self.client
.post(url)
.headers(self.headers.clone())
.json(&body)
.send()
.await?
.text()
.await?);
Ok(())
}
}

69
src/csv_parser.rs Normal file
View File

@ -0,0 +1,69 @@
use crate::utils::Result;
use anyhow::anyhow;
use chrono::{NaiveDate, NaiveTime};
use csv::StringRecord;
mod headings {
pub const USER: usize = 1;
pub const USER_EMAIL: usize = 2;
pub const CLIENT_NAME: usize = 3;
pub const PROJECT_NAME: usize = 4;
pub const TASK_NAME: usize = 5;
pub const DESCRIPTION: usize = 6;
pub const BILLABLE: usize = 7;
pub const START_DATE: usize = 8;
pub const END_DATE: usize = 9;
pub const START_TIME: usize = 10;
pub const END_TIME: usize = 11;
pub const DURATION: usize = 12;
pub const TAGS: usize = 13;
}
fn parse_csv_row(row: StringRecord) -> Result<crate::entity::time_entry::Model> {
let start_date = row
.get(headings::START_DATE)
.ok_or(anyhow!("Missing start date in CSV"))?;
let start_time = row
.get(headings::START_TIME)
.ok_or(anyhow!("Missing start time in CSV"))?;
let end_date = row
.get(headings::END_DATE)
.ok_or(anyhow!("Missing end date in CSV"))?;
let end_time = row
.get(headings::END_TIME)
.ok_or(anyhow!("Missing end time in CSV"))?;
let start_time = NaiveTime::parse_from_str(start_time, "%H:%M:%S")?;
let end_time = NaiveTime::parse_from_str(end_time, "%H:%M:%S")?;
let start_date = NaiveDate::parse_from_str(start_date, "%Y-%m-%d")?;
let end_date = NaiveDate::parse_from_str(end_date, "%Y-%m-%d")?;
let start = start_date.and_time(start_time);
let end = end_date.and_time(end_time);
let description = row
.get(headings::DESCRIPTION)
.ok_or(anyhow!("Missing description in CSV"))?;
let project_name = row
.get(headings::PROJECT_NAME)
.ok_or(anyhow!("Missing project name in CSV"))?;
let client_name = row
.get(headings::CLIENT_NAME)
.ok_or(anyhow!("Missing client name in CSV"))?;
let tags = row
.get(headings::TAGS)
.ok_or(anyhow!("Missing tags in CSV"))?;
let task_name = row
.get(headings::TASK_NAME)
.ok_or(anyhow!("Missing task name in CSV"))?;
let billable = match row
.get(headings::BILLABLE)
.ok_or(anyhow!("Missing billable in CSV"))?
{
"Yes" => true,
"No" => false,
_ => unimplemented!("Unknown billable value"),
};
unimplemented!("Refactor model to support non-json sources")
}

View File

@ -1,23 +1,47 @@
use crate::entity::{client, project, time_entry};
use crate::types::{Project, ProjectClient, ReportEntry, TimeEntry};
use crate::toggl_api::types::{Client, Project, ReportRow, TimeEntry};
use sea_orm::sea_query::OnConflict;
use sea_orm::{NotSet, Set};
impl ReportEntry {
pub(crate) fn as_models(&self) -> Vec<time_entry::ActiveModel> {
impl ReportRow {
pub fn to_time_entries(&self, workspace_id: i64) -> Vec<TimeEntry> {
self.time_entries
.iter()
.map(|inner| time_entry::ActiveModel {
id: NotSet,
toggl_id: Set(inner.id as i64),
description: Set(self.description.clone()),
project_id: Set(self.project_id.map(|id| id as i64)),
start: Set(chrono::DateTime::parse_from_rfc3339(&inner.start).unwrap()),
stop: Set(chrono::DateTime::parse_from_rfc3339(&inner.stop).unwrap()),
raw_json: Set(serde_json::to_value(inner).unwrap()),
.map(|inner| TimeEntry {
id: inner.id as i64,
description: self.description.clone(),
project_id: self.project_id.map(|id| id as i64),
task_id: self.task_id.map(|id| id as i64),
billable: self.billable,
start: inner.start,
stop: Some(inner.stop),
at: inner.at,
server_deleted_at: None,
tags: vec![], // TODO: tags on report row import, need to track in separate table
workspace_id,
duration: inner.seconds as i64,
tag_ids: self.tag_ids.iter().map(|ids| *ids as i64).collect(),
user_id: self.user_id as i64,
})
.collect()
}
}
impl TimeEntry {
pub(crate) fn as_model(&self) -> time_entry::ActiveModel {
time_entry::ActiveModel {
id: NotSet,
toggl_id: Set(self.id),
description: Set(self.description.clone()),
project_id: Set(self.project_id),
start: Set(self.start.fixed_offset()),
stop: Set(self.stop.unwrap().fixed_offset()),
raw_json: Set(serde_json::to_value(self).unwrap()),
server_updated_at: Set(self.at.fixed_offset()),
server_deleted_at: Set(self.server_deleted_at.map(|dt| dt.fixed_offset())),
tags: Set(serde_json::to_value(&self.tags).unwrap()),
}
}
pub fn grafting_conflict_statement() -> OnConflict {
OnConflict::column(time_entry::Column::TogglId)
@ -27,20 +51,23 @@ impl ReportEntry {
time_entry::Column::Start,
time_entry::Column::Stop,
time_entry::Column::RawJson,
time_entry::Column::ServerUpdatedAt,
time_entry::Column::ServerDeletedAt,
time_entry::Column::Tags,
])
.to_owned()
}
}
impl ProjectClient {
impl Client {
pub fn as_model(&self) -> client::ActiveModel {
client::ActiveModel {
id: Set(self.id),
name: Set(self.name.clone()),
archived: Set(self.archived.clone()),
archived: Set(self.archived),
workspace_id: Set(self.wid),
at: Set(self.at.clone().fixed_offset()),
server_deleted_at: Set(self.server_deleted_at.clone().map(|dt| dt.fixed_offset())),
server_deleted_at: Set(self.server_deleted_at.map(|dt| dt.fixed_offset())),
}
}
@ -67,6 +94,10 @@ impl Project {
client_id: Set(self.client_id.map(|id| id as i32)),
workspace_id: Set(self.workspace_id as i64),
raw_json: Set(serde_json::to_value(self).unwrap()),
color: Set(self.color.clone()),
server_created_at: Set(self.created_at.clone().fixed_offset()),
server_updated_at: Set(self.at.clone().fixed_offset()),
server_deleted_at: Set(self.server_deleted_at.map(|dt| dt.fixed_offset())),
}
}
@ -78,6 +109,10 @@ impl Project {
project::Column::ClientId,
project::Column::WorkspaceId,
project::Column::RawJson,
project::Column::Color,
project::Column::ServerCreatedAt,
project::Column::ServerUpdatedAt,
project::Column::ServerDeletedAt,
])
.to_owned()
}

View File

@ -1,4 +1,5 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.2
#![allow(unused_imports)]
pub use super::client::Entity as Client;
pub use super::project::Entity as Project;

View File

@ -16,6 +16,11 @@ pub struct Model {
pub active: bool,
#[sea_orm(column_type = "JsonBinary")]
pub raw_json: Json,
#[sea_orm(column_type = "Text")]
pub color: String,
pub server_created_at: DateTimeWithTimeZone,
pub server_updated_at: DateTimeWithTimeZone,
pub server_deleted_at: Option<DateTimeWithTimeZone>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]

View File

@ -16,6 +16,10 @@ pub struct Model {
pub stop: DateTimeWithTimeZone,
#[sea_orm(column_type = "JsonBinary")]
pub raw_json: Json,
#[sea_orm(column_type = "JsonBinary")]
pub tags: Json,
pub server_updated_at: DateTimeWithTimeZone,
pub server_deleted_at: Option<DateTimeWithTimeZone>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]

View File

@ -1,35 +1,19 @@
use crate::client::TogglClient;
use crate::entity::prelude::TimeEntry;
use crate::entity::time_entry;
use crate::entity::time_entry::ActiveModel;
use crate::types::{Current, Project, ProjectClient, ReportEntry, TogglQuery};
use anyhow::anyhow;
use axum::extract::multipart::Field;
use axum::extract::{Multipart, Query};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use crate::toggl_api::TogglApiClient;
use axum::routing::{get, post};
use axum::{Extension, Json, Router};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use utils::{shutdown_signal, Result};
use chrono::{NaiveDate, NaiveTime};
use axum::{Extension, Router};
use clap::Parser;
use migration::{Migrator, MigratorTrait};
use sea_orm::sea_query::IntoCondition;
use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter};
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
use std::net::SocketAddr;
use tower_http::trace::TraceLayer;
use tracing::{debug, instrument};
use utils::{shutdown_signal, Result};
mod client;
mod csv_parser;
mod db;
mod entity;
mod poll;
mod types;
mod routes;
mod sync_service;
mod toggl_api;
mod utils;
#[derive(Debug, Clone, Parser)]
@ -51,186 +35,14 @@ struct Config {
poll_period: u64,
}
#[instrument(skip(db, toggl_client))]
pub async fn report(
Extension(toggl_client): Extension<TogglClient>,
Extension(db): Extension<DatabaseConnection>,
Json(query): Json<TogglQuery>,
) -> Result<Json<Vec<ReportEntry>>> {
let report = toggl_client.full_report(&query).await?;
debug!("Returned results: {:?}", report);
// We don't perform any deletes on report-fetched entries
cache_report(&db, &report, None).await?;
Ok(Json(report))
}
#[instrument(skip_all)]
async fn cache_report(
db: &DatabaseConnection,
models: &Vec<ReportEntry>,
exclusive_on: Option<Condition>,
) -> Result<()> {
let models = models.iter().flat_map(|entry| entry.as_models());
let models = models.collect::<Vec<_>>();
let ids = models
.iter()
.map(|entry| entry.toggl_id.clone().unwrap())
.collect::<Vec<_>>();
debug!("Caching report entries: {:?}", models);
// TODO: Why is this needed?
if models.is_empty() {
return Ok(());
}
TimeEntry::insert_many(models)
.on_conflict(ReportEntry::grafting_conflict_statement())
.exec(db)
.await?;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(())
}
#[instrument(skip(toggl_client))]
pub async fn current(
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Option<Current>>> {
Ok(toggl_client.get_current().await.map(Json)?)
}
#[instrument(skip(toggl_client))]
pub async fn start_time_entry(
Extension(toggl_client): Extension<TogglClient>,
Json(body): Json<HashMap<String, Value>>,
) -> Result<impl IntoResponse> {
toggl_client.start_time_entry(body).await?;
Ok((StatusCode::OK, "Ok"))
}
#[instrument(skip(db, toggl_client))]
async fn projects(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Vec<Project>>> {
let projects = toggl_client.fetch_projects().await?;
entity::project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(projects))
}
#[instrument(skip(toggl_client, db))]
async fn clients(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Vec<ProjectClient>>> {
let clients = toggl_client.fetch_clients().await?;
entity::client::Entity::insert_many(clients.iter().map(ProjectClient::as_model))
.on_conflict(ProjectClient::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(clients))
}
async fn health(Extension(toggl_client): Extension<TogglClient>) -> Result<&'static str> {
return if toggl_client.check_health().await {
Ok("Ok")
} else {
Err(anyhow!("Panopto health check failed").into())
};
}
#[derive(Debug, Clone, Deserialize)]
struct RefreshQuery {
start_date: Option<String>,
}
#[instrument(skip(toggl_client, db))]
async fn refresh(
Extension(toggl_client): Extension<TogglClient>,
Extension(db): Extension<DatabaseConnection>,
Query(RefreshQuery { start_date }): Query<RefreshQuery>,
) -> Result<&'static str> {
let end_date = chrono::Utc::now();
let end_date_query_string = end_date.date_naive().format("%Y-%m-%d").to_string();
let start_date_query_string = start_date.unwrap_or(end_date_query_string.clone());
let start_date = NaiveDate::parse_from_str(&start_date_query_string, "%Y-%m-%d")?;
let query = TogglQuery {
start_date: Some(start_date_query_string),
end_date: Some(end_date_query_string),
..Default::default()
};
let report = toggl_client.full_report(&query).await?;
let exclusivity_condition = day_exclusivity_condition(start_date, end_date.date_naive());
cache_report(&db, &report, Some(exclusivity_condition)).await?;
Ok("Ok")
}
fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition {
time_entry::Column::Start
.between(
start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()),
end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()),
)
.into_condition()
}
fn from_csv_row(row: csv::StringRecord) -> ActiveModel {
unimplemented!("Need to refactor db first")
}
async fn import_csv(
Extension(db): Extension<DatabaseConnection>,
mut multipart: Multipart,
) -> Result<impl IntoResponse> {
return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented"));
// while let Some(field) = multipart.next_field().await? {
// // if let Some("csv") = field.name() {
// // let csv = field.bytes().await?;
// // let mut csv = csv::Reader::from_reader(csv.as_ref());
// // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row);
// //
// // time_entry::Entity::insert_many(data.collect::<Result<_>>().unwrap())
// // .on_conflict(ReportEntry::grafting_conflict_statement())
// // .exec(&db)
// // .await
// // .unwrap()
// // }
// }
}
#[tokio::main]
async fn main() -> Result<()> {
// install global collector configured based on RUST_LOG env var.
tracing_subscriber::fmt::init();
let config = Config::parse();
let toggl_client = TogglClient::new(
&config.workspace_id.to_string(),
&STANDARD.encode(&format!("{}:api_token", config.toggl_api_token)),
);
let toggl_client =
TogglApiClient::new(&config.workspace_id.to_string(), &config.toggl_api_token);
let db = sea_orm::Database::connect(config.database_url)
.await
@ -246,14 +58,14 @@ async fn main() -> Result<()> {
// build our application with a route
let app = Router::new()
.route("/import_csv", post(import_csv))
.route("/health", get(health))
.route("/current", get(current))
.route("/refresh", post(refresh))
.route("/report", post(report))
.route("/projects", get(projects))
.route("/clients", get(clients))
.route("/start_time_entry", post(start_time_entry))
.route("/import_csv", post(routes::import_csv))
.route("/health", get(routes::health))
.route("/current", get(routes::current))
.route("/refresh", post(routes::refresh))
.route("/report", post(routes::report))
.route("/projects", get(routes::projects))
.route("/clients", get(routes::clients))
.route("/start_time_entry", post(routes::start_time_entry))
.layer(Extension(toggl_client))
.layer(Extension(db))
.layer(TraceLayer::new_for_http());

View File

@ -1,23 +1,23 @@
use crate::client::TogglClient;
use crate::entity::{client, project, time_entry};
use crate::types::{Project, ProjectClient, TogglQuery};
use sea_orm::{DatabaseConnection, EntityTrait, QuerySelect};
use crate::entity::time_entry;
use crate::sync_service::{update_database, UpdateStats};
use crate::toggl_api::TogglApiClient;
use crate::utils;
use chrono::{DateTime, FixedOffset};
use migration::Order;
use sea_orm::{DatabaseConnection, EntityTrait, QueryOrder, QuerySelect};
use std::ops::Sub;
use tracing::instrument;
use crate::{day_exclusivity_condition, utils};
#[tracing::instrument(skip(client, db))]
pub async fn poll_job(client: TogglClient, db: DatabaseConnection, poll_period: u64) {
// Every 2h, poll the Toggl API for new time entries for today to cache them in the database
pub async fn poll_job(client: TogglApiClient, db: DatabaseConnection, poll_period: u64) {
// Periodically poll the Toggl API for new time entries for today to cache them in the database
let period = tokio::time::Duration::from_secs(poll_period);
loop {
tracing::info!("Polling Toggl API");
match perform_poll(&client, &db).await {
Ok(report_entries_count) => {
tracing::info!(
"Successfully polled Toggl API: {:?} entries retrieved",
report_entries_count
);
Ok(poll_update_data) => {
tracing::info!("Successfully polled Toggl API: {:?}", poll_update_data);
}
Err(error) => {
@ -29,60 +29,29 @@ pub async fn poll_job(client: TogglClient, db: DatabaseConnection, poll_period:
}
}
#[instrument(skip(client, db))]
#[instrument(skip(toggl_client, db))]
pub async fn perform_poll(
client: &TogglClient,
toggl_client: &TogglApiClient,
db: &DatabaseConnection,
) -> utils::Result<usize> {
let now = chrono::Utc::now();
let today_string = now
.date_naive()
.format("%Y-%m-%d")
.to_string();
let report = client
.full_report(&TogglQuery {
start_date: Some(today_string.clone()),
end_date: Some(today_string.clone()),
..Default::default()
})
.await?;
let existing_project_ids = project::Entity::find()
) -> utils::Result<UpdateStats> {
let since = time_entry::Entity::find()
.select_only()
.column(project::Column::TogglId)
.into_tuple::<i64>()
.all(db)
.column(time_entry::Column::ServerUpdatedAt)
.order_by(time_entry::Column::ServerUpdatedAt, Order::Desc)
.into_tuple::<DateTime<FixedOffset>>()
.one(db)
.await?;
let new_projects = report
.iter()
.filter_map(|entry| entry.project_id)
.any(|project_id| !existing_project_ids.contains(&(project_id as i64)));
let since = since.unwrap_or(
chrono::Utc::now()
.sub(chrono::Duration::days(1))
.fixed_offset(),
);
if new_projects {
let clients = client.fetch_clients().await?;
let time_entries = toggl_client
.fetch_time_entries_modified_since(since.to_utc())
.await?;
client::Entity::insert_many(clients.iter().map(ProjectClient::as_model))
.on_conflict(ProjectClient::grafting_conflict_statement())
.exec(db)
.await?;
let projects = client.fetch_projects().await?;
project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(db)
.await?;
}
crate::cache_report(
&db,
&report,
Some(
day_exclusivity_condition(now.date_naive(), now.date_naive()),
),
)
.await?;
Ok(report.len())
// These are changes only so there is no need to enforce exclusivity
update_database(db, toggl_client, &time_entries, None).await
}

149
src/routes.rs Normal file
View File

@ -0,0 +1,149 @@
use crate::sync_service::UpdateStats;
use crate::toggl_api::types::{self, Client, Project, ReportRow, TogglReportQuery};
use crate::toggl_api::TogglApiClient;
use crate::{entity, sync_service, utils};
use anyhow::anyhow;
use axum::extract::{Multipart, Query};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::{Extension, Json};
use chrono::{DateTime, Utc};
use sea_orm::{DatabaseConnection, EntityTrait};
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
use tracing::instrument;
#[instrument(skip(db, toggl_client))]
pub async fn report(
Extension(toggl_client): Extension<TogglApiClient>,
Extension(db): Extension<DatabaseConnection>,
Json(query): Json<TogglReportQuery>,
) -> utils::Result<Json<Vec<ReportRow>>> {
let report = toggl_client.full_report(&query).await?;
let time_entries = report
.iter()
.flat_map(|entry| entry.to_time_entries(toggl_client.workspace_id()))
.collect::<Vec<_>>();
sync_service::update_database(&db, &toggl_client, &time_entries, None).await?;
Ok(Json(report))
}
#[instrument(skip(toggl_client))]
pub async fn current(
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<Json<Option<types::TimeEntry>>> {
toggl_client.fetch_current_time_entry().await.map(Json)
}
#[instrument(skip(toggl_client))]
pub async fn start_time_entry(
Extension(toggl_client): Extension<TogglApiClient>,
Json(body): Json<HashMap<String, Value>>,
) -> utils::Result<impl IntoResponse> {
toggl_client.start_time_entry(body).await?;
Ok((StatusCode::OK, "Ok"))
}
#[instrument(skip(db, toggl_client))]
pub async fn projects(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<Json<Vec<Project>>> {
let projects = toggl_client.fetch_projects().await?;
entity::project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(projects))
}
#[instrument(skip(toggl_client, db))]
pub async fn clients(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<Json<Vec<Client>>> {
let clients = toggl_client.fetch_clients().await?;
entity::client::Entity::insert_many(clients.iter().map(Client::as_model))
.on_conflict(Client::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(clients))
}
pub async fn health(
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<&'static str> {
if toggl_client.check_health().await {
Ok("Ok")
} else {
Err(anyhow!("Toggl health check failed").into())
}
}
pub async fn import_csv(
Extension(db): Extension<DatabaseConnection>,
mut multipart: Multipart,
) -> utils::Result<impl IntoResponse> {
return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented"));
// while let Some(field) = multipart.next_field().await? {
// // if let Some("csv") = field.name() {
// // let csv = field.bytes().await?;
// // let mut csv = csv::Reader::from_reader(csv.as_ref());
// // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row);
// //
// // time_entry::Entity::insert_many(data.collect::<Result<_>>().unwrap())
// // .on_conflict(ReportEntry::grafting_conflict_statement())
// // .exec(&db)
// // .await
// // .unwrap()
// // }
// }
}
#[derive(Debug, Clone, Deserialize)]
pub struct RefreshQuery {
start_date: Option<DateTime<Utc>>,
end_date: Option<DateTime<Utc>>,
}
#[instrument(skip(toggl_client, db))]
pub async fn refresh(
Extension(toggl_client): Extension<TogglApiClient>,
Extension(db): Extension<DatabaseConnection>,
Query(RefreshQuery {
start_date,
end_date,
}): Query<RefreshQuery>,
) -> utils::Result<Json<UpdateStats>> {
let time_entries = match (start_date, end_date) {
(Some(start_date), Some(end_date)) => {
toggl_client
.fetch_time_entries_in_range(start_date, end_date)
.await?
}
(Some(start_date), None) => {
let end_date = Utc::now();
toggl_client
.fetch_time_entries_in_range(start_date, end_date)
.await?
}
(None, Some(_)) => {
return Err(anyhow!("start_date must be provided if end_date is provided").into());
}
_ => toggl_client.fetch_recent_time_entries().await?,
};
sync_service::update_database(&db, &toggl_client, &time_entries, None)
.await
.map(Json)
}

118
src/sync_service.rs Normal file
View File

@ -0,0 +1,118 @@
use crate::entity::time_entry::Entity as TimeEntry;
use crate::entity::{client, project, time_entry};
use crate::toggl_api::types::{Client, Project, TimeEntry as ToggleApiTimeEntry};
use crate::toggl_api::TogglApiClient;
use crate::utils;
use migration::Condition;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QuerySelect};
use serde::Serialize;
#[derive(Debug, Serialize)]
pub struct UpdateStats {
retrieved: UpdateStatsInner,
written: UpdateStatsInner,
}
#[derive(Debug, Serialize)]
pub struct UpdateStatsInner {
updated: usize,
deleted: usize,
}
pub async fn update_database(
db: &DatabaseConnection,
toggl_client: &TogglApiClient,
time_entries: &[ToggleApiTimeEntry],
exclusive_on: Option<Condition>,
) -> utils::Result<UpdateStats> {
let (deleted_entries, time_entries) = time_entries
.iter()
.partition::<Vec<_>, _>(|entry| entry.server_deleted_at.is_some());
let retrieved = UpdateStatsInner {
updated: time_entries.len(),
deleted: deleted_entries.len(),
};
let mut written = UpdateStatsInner {
updated: 0,
deleted: 0,
};
let deleted_ids = deleted_entries
.iter()
.map(|entry| entry.id)
.collect::<Vec<_>>();
if !deleted_ids.is_empty() {
let delete_result = TimeEntry::delete_many()
.filter(time_entry::Column::TogglId.is_in(deleted_ids))
.exec(db)
.await?;
written.deleted = delete_result.rows_affected as usize;
}
let existing_project_ids = project::Entity::find()
.select_only()
.column(project::Column::TogglId)
.into_tuple::<i64>()
.all(db)
.await?;
let new_projects = time_entries
.iter()
.filter_map(|entry| entry.project_id)
.any(|project_id| !existing_project_ids.contains(&project_id));
if new_projects {
let clients = toggl_client.fetch_clients().await?;
client::Entity::insert_many(clients.iter().map(Client::as_model))
.on_conflict(Client::grafting_conflict_statement())
.exec(db)
.await?;
let projects = toggl_client.fetch_projects().await?;
project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(db)
.await?;
}
let ids = time_entries
.iter()
.map(|entry| entry.id)
.collect::<Vec<_>>();
let models = time_entries
.into_iter()
.map(|entry| entry.as_model())
.collect::<Vec<_>>();
// TODO: Why is this needed?
if models.is_empty() {
return Ok(UpdateStats { retrieved, written });
}
let insert_result = TimeEntry::insert_many(models)
.on_conflict(ToggleApiTimeEntry::grafting_conflict_statement())
.exec_without_returning(db)
.await?;
written.updated = insert_result as usize;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(UpdateStats { retrieved, written })
}

229
src/toggl_api/api_client.rs Normal file
View File

@ -0,0 +1,229 @@
use crate::toggl_api::types::{
Client as ProjectClient, Project, ReportRow, TimeEntry, TogglReportQuery,
};
use anyhow::anyhow;
use axum::http::StatusCode;
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use chrono::{DateTime, Utc};
use hyper::HeaderMap;
use reqwest::header::HeaderValue;
use reqwest::{Client, RequestBuilder, Response};
use serde_json::Value;
use std::collections::HashMap;
use std::time::Duration;
use tracing::instrument;
use tracing::log::debug;
#[derive(Debug, Clone)]
pub struct TogglApiClient {
client: Client,
workspace_id: String,
base_url: String,
reports_base_url: String,
}
impl TogglApiClient {
async fn make_request(&self, request_builder: RequestBuilder) -> crate::Result<Response> {
loop {
let builder = request_builder
.try_clone()
.ok_or(anyhow!("Failed to clone request builder"))?;
let response = self.client.execute(builder.build()?).await?;
// If we are rate limited, wait a bit and try again
if response.status() == StatusCode::TOO_MANY_REQUESTS {
tokio::time::sleep(Duration::from_secs(5)).await;
} else {
return Ok(response);
}
}
}
pub fn workspace_id(&self) -> i64 {
self.workspace_id.parse().unwrap()
}
pub async fn check_health(&self) -> bool {
true
}
pub fn new(workspace_id: &str, api_token: &str) -> Self {
let toggl_auth = &STANDARD.encode(format!("{}:api_token", api_token));
let client = Client::builder()
.default_headers(Self::default_headers(toggl_auth))
.build()
.expect("Failed to build reqwest client");
Self {
client,
workspace_id: workspace_id.to_string(),
base_url: "https://api.track.toggl.com/api/v9".to_string(),
reports_base_url: "https://api.track.toggl.com/reports/api/v3".to_string(),
}
}
fn default_headers(toggl_auth: &str) -> HeaderMap {
let mut headers = HeaderMap::new();
let mut value = HeaderValue::from_str(&format!("Basic {}", toggl_auth)).unwrap();
value.set_sensitive(true);
headers.insert("Authorization", value);
headers
}
pub async fn fetch_projects(&self) -> crate::Result<Vec<Project>> {
let url = format!(
"{base_url}/workspaces/{}/projects",
self.workspace_id,
base_url = self.base_url,
);
let projects = self
.make_request(self.client.get(&url))
.await?
.json::<Vec<Project>>()
.await?;
Ok(projects)
}
pub async fn fetch_clients(&self) -> crate::Result<Vec<ProjectClient>> {
let url = format!(
"{base_url}/workspaces/{}/clients",
self.workspace_id,
base_url = self.base_url,
);
let clients = self
.make_request(self.client.get(&url))
.await?
.json::<Vec<ProjectClient>>()
.await?;
Ok(clients)
}
pub async fn fetch_recent_time_entries(&self) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(self
.make_request(self.client.get(url))
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_time_entries_modified_since(
&self,
date_time: DateTime<Utc>,
) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(self
.make_request(
self.client
.get(url)
.query(&[("since", date_time.timestamp())]),
)
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_time_entries_in_range(
&self,
start: DateTime<Utc>,
end: DateTime<Utc>,
) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(self
.make_request(self.client.get(url).query(&[
("start_date", start.to_rfc3339()),
("end_date", end.to_rfc3339()),
]))
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_current_time_entry(&self) -> crate::Result<Option<TimeEntry>> {
let url = format!(
"{base_url}/me/time_entries/current",
base_url = self.base_url
);
let res = self
.make_request(self.client.get(url))
.await?
.json::<Option<TimeEntry>>()
.await?;
Ok(res)
}
pub async fn start_time_entry(&self, mut body: HashMap<String, Value>) -> crate::Result<()> {
let url = format!(
"{base_url}/workspaces/{workspace_id}/time_entries",
base_url = self.base_url,
workspace_id = self.workspace_id
);
body.insert(
"workspace_id".to_string(),
self.workspace_id.parse::<i32>()?.into(),
);
self.make_request(self.client.post(url).json(&body)).await?;
Ok(())
}
/////////////
// Reports //
/////////////
fn paginate_filters(original_filters: &TogglReportQuery, last_row_id: u64) -> TogglReportQuery {
let mut filters: TogglReportQuery = original_filters.clone();
filters.first_row_number = Some(last_row_id + 1);
filters
}
#[instrument(skip(self, filters))]
pub async fn full_report(&self, filters: &TogglReportQuery) -> crate::Result<Vec<ReportRow>> {
let url = format!(
"{base_url}/workspace/{workspace_id}/search/time_entries",
base_url = self.reports_base_url,
workspace_id = self.workspace_id
);
let mut last_row_number = Some(0);
let mut results = vec![];
while let Some(last_row_number_n) = last_row_number {
debug!("Fetching page starting with {}", last_row_number_n);
// If we are not on the first page, wait a bit to avoid rate limiting
if last_row_number_n != 0 {
tokio::time::sleep(Duration::from_secs(1)).await;
}
// TODO: Implement rate limiting
let response = self
.make_request(
self.client
.post(&url)
.json(&Self::paginate_filters(filters, last_row_number_n)),
)
.await?;
let data = response.json::<Vec<ReportRow>>().await?;
last_row_number = data.last().map(|e| e.row_number as u64);
data.into_iter().for_each(|e| results.push(e));
}
Ok(results)
}
}

4
src/toggl_api/mod.rs Normal file
View File

@ -0,0 +1,4 @@
pub mod api_client;
pub mod types;
pub use api_client::TogglApiClient;

View File

@ -1,21 +1,12 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use serde_with::skip_serializing_none;
use std::collections::HashMap;
use std::option::Option;
use chrono::{DateTime, Utc};
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct TimeEntry {
pub id: u64,
pub seconds: u32,
pub start: String,
pub stop: String,
pub at: String,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ReportEntry {
pub struct ReportRow {
pub user_id: u32,
pub username: String,
pub project_id: Option<u64>,
@ -26,23 +17,40 @@ pub struct ReportEntry {
pub billable_amount_in_cents: Option<u64>,
pub hourly_rate_in_cents: Option<u64>,
pub currency: String,
pub time_entries: Vec<TimeEntry>,
pub time_entries: Vec<ReportRowInnerTimeEntry>,
pub row_number: u32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Current {
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ReportRowInnerTimeEntry {
pub id: u64,
pub workspace_id: u64,
pub project_id: Option<u64>,
pub task_id: Option<u64>,
pub seconds: u32,
pub start: DateTime<Utc>,
pub stop: DateTime<Utc>,
pub at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimeEntry {
pub id: i64,
pub workspace_id: i64,
pub project_id: Option<i64>,
pub task_id: Option<i64>,
pub billable: bool,
pub start: String,
pub stop: Option<String>,
pub start: DateTime<Utc>,
pub stop: Option<DateTime<Utc>>,
pub duration: i64,
pub description: String,
pub tags: Vec<String>,
pub tag_ids: Vec<u64>,
pub tag_ids: Vec<i64>,
pub at: DateTime<Utc>,
pub server_deleted_at: Option<DateTime<Utc>>,
pub user_id: i64,
// Ignored fields
// duronly: bool,
// uid: i64,
// wid: i64,
// pid: Option<i64>,
}
#[derive(Debug, Serialize, Deserialize)]
@ -52,14 +60,34 @@ pub struct Project {
pub client_id: Option<u64>,
pub name: String,
pub active: bool,
#[serde(flatten)]
pub rest: HashMap<String, Value>,
pub color: String,
pub at: DateTime<Utc>,
pub server_deleted_at: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
// cid: Option<serde_json::Value>,
// wid: i64,
// rate: Option<serde_json::Value>,
// status: String,
// billable: Option<serde_json::Value>,
// currency: Option<serde_json::Value>,
// template: Option<serde_json::Value>,
// fixed_fee: Option<serde_json::Value>,
// recurring: bool,
// is_private: bool,
// start_date: String,
// template_id: Option<serde_json::Value>,
// actual_hours: i64,
// actual_seconds: i64,
// auto_estimates: Option<serde_json::Value>,
// estimated_hours: Option<serde_json::Value>,
// estimated_seconds: Option<serde_json::Value>,
// rate_last_updated: Option<serde_json::Value>,
// recurring_parameters: Option<serde_json::Value>,
}
/// Represents a client in Toggl.
#[derive(Debug, Serialize, Deserialize)]
pub struct ProjectClient {
pub struct Client {
/// Indicates whether the client is archived or not.
pub archived: bool,
@ -79,10 +107,9 @@ pub struct ProjectClient {
pub wid: i32,
}
#[allow(non_snake_case)]
#[skip_serializing_none]
#[derive(Serialize, Deserialize, Clone, Default)]
pub struct TogglQuery {
pub struct TogglReportQuery {
pub billable: Option<bool>,
pub client_ids: Option<Vec<u64>>,
pub description: Option<String>,
@ -97,11 +124,13 @@ pub struct TogglQuery {
pub min_duration_seconds: Option<u64>,
pub order_by: Option<String>,
pub order_dir: Option<String>,
pub postedFields: Option<Vec<String>>,
#[serde(rename = "postedFields")]
pub posted_fields: Option<Vec<String>>,
pub project_ids: Option<Vec<u64>>,
pub rounding: Option<u64>,
pub rounding_minutes: Option<u64>,
pub startTime: Option<String>,
#[serde(rename = "startTime")]
pub start_time: Option<String>,
pub start_date: Option<String>,
pub tag_ids: Option<Vec<u64>>,
pub task_ids: Option<Vec<u64>>,
@ -114,7 +143,7 @@ pub struct TogglQuery {
use std::fmt;
impl fmt::Debug for TogglQuery {
impl fmt::Debug for TogglReportQuery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut ds = f.debug_struct("TogglQuery");
@ -160,8 +189,8 @@ impl fmt::Debug for TogglQuery {
if let Some(order_dir) = &self.order_dir {
ds.field("order_dir", order_dir);
}
if let Some(postedFields) = &self.postedFields {
ds.field("postedFields", postedFields);
if let Some(posted_fields) = &self.posted_fields {
ds.field("postedFields", posted_fields);
}
if let Some(project_ids) = &self.project_ids {
ds.field("project_ids", project_ids);
@ -172,8 +201,8 @@ impl fmt::Debug for TogglQuery {
if let Some(rounding_minutes) = &self.rounding_minutes {
ds.field("rounding_minutes", rounding_minutes);
}
if let Some(startTime) = &self.startTime {
ds.field("startTime", startTime);
if let Some(start_time) = &self.start_time {
ds.field("startTime", start_time);
}
if let Some(start_date) = &self.start_date {
ds.field("start_date", start_date);

View File

@ -1,5 +1,9 @@
use crate::entity::time_entry;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use chrono::{NaiveDate, NaiveTime};
use migration::{Condition, IntoCondition};
use sea_orm::ColumnTrait;
use tokio::signal;
#[derive(Debug)]
@ -52,3 +56,12 @@ pub async fn shutdown_signal() {
_ = terminate => {},
}
}
pub fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition {
time_entry::Column::Start
.between(
start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()),
end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()),
)
.into_condition()
}