Compare commits

..

No commits in common. "5f2120ea1834643ee3780857eb29d252683ac4da" and "8b2f0102c4c539da9aeb4f4ff94cc37ede854e57" have entirely different histories.

28 changed files with 507 additions and 3771 deletions

3
.env
View File

@ -1,4 +1 @@
DATABASE_URL=postgres://postgres@localhost:5432/toggl_portal DATABASE_URL=postgres://postgres@localhost:5432/toggl_portal
TOGGL_API_TOKEN=237918c4e008f5aeefe886c9112ab560
TOGGL_WORKSPACE_ID=2837131
WORKSPACE_ID=${TOGGL_WORKSPACE_ID}

View File

@ -30,7 +30,7 @@ jobs:
with: with:
context: . context: .
push: true push: true
tags: git.joshuacoles.me/personal/toggl-portal:arm tags: git.joshuacoles.me/personal/beachhead-services/toggl-portal:arm
build-args: | build-args: |
APP_NAME=toggl-portal APP_NAME=toggl-portal
PACKAGE_NAME=toggl-portal PACKAGE_NAME=toggl-portal

1
.gitignore vendored
View File

@ -1,4 +1,3 @@
/target /target
/migration/target
/.idea /.idea
/ignore /ignore

1
Cargo.lock generated
View File

@ -1630,7 +1630,6 @@ version = "0.1.0"
dependencies = [ dependencies = [
"async-std", "async-std",
"sea-orm-migration", "sea-orm-migration",
"serde_json",
] ]
[[package]] [[package]]

View File

@ -1,10 +0,0 @@
- Possible sources
- report
- me/time_enries
- This will need to filter by `workspace_id` if don't want to deal with multiple workspaces.
- csv
- This can give us a complete picture of the time entries, but it lacks an `id` field so cannot be easily updated
- The first two contain `at` which is when the time entry was last updated, useful for grafting
- `me/time_entries` can get anything **updated** since a given time
- This is incredibly useful for updating the time entries
- Most historic time entries are not ever changed so a csv of say, 2023 is probably alwasy going to be valid

View File

@ -1,5 +0,0 @@
# Toggl Portal
## Purpose
- Act as an authenticated client for other apps to use to access Toggl data in addition to caching this data for other
data analysis purposes.

2811
migration/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -10,12 +10,10 @@ path = "src/lib.rs"
[dependencies] [dependencies]
async-std = { version = "1", features = ["attributes", "tokio1"] } async-std = { version = "1", features = ["attributes", "tokio1"] }
serde_json = "^1.0"
[dependencies.sea-orm-migration] [dependencies.sea-orm-migration]
version = "0.12.0" version = "0.12.0"
features = [ features = [
"runtime-tokio-rustls", # `ASYNC_RUNTIME` feature "runtime-tokio-rustls", # `ASYNC_RUNTIME` feature
"sqlx-postgres", # `DATABASE_DRIVER` feature "sqlx-postgres", # `DATABASE_DRIVER` feature
'with-json'
] ]

View File

@ -4,8 +4,6 @@ mod m20231101_172500_create_time_entry_table;
mod m20231106_134950_create_clients; mod m20231106_134950_create_clients;
mod m20231106_195401_create_projects; mod m20231106_195401_create_projects;
mod m20231106_201029_add_time_entry_project_fk; mod m20231106_201029_add_time_entry_project_fk;
mod m20240302_102418_update_project_table;
mod m20240302_171651_update_time_entry_table;
pub struct Migrator; pub struct Migrator;
@ -17,8 +15,6 @@ impl MigratorTrait for Migrator {
Box::new(m20231106_134950_create_clients::Migration), Box::new(m20231106_134950_create_clients::Migration),
Box::new(m20231106_195401_create_projects::Migration), Box::new(m20231106_195401_create_projects::Migration),
Box::new(m20231106_201029_add_time_entry_project_fk::Migration), Box::new(m20231106_201029_add_time_entry_project_fk::Migration),
Box::new(m20240302_102418_update_project_table::Migration),
Box::new(m20240302_171651_update_time_entry_table::Migration),
] ]
} }

View File

@ -19,23 +19,11 @@ impl MigrationTrait for Migration {
.primary_key(), .primary_key(),
) )
.col( .col(
ColumnDef::new(TimeEntry::TogglId) ColumnDef::new(TimeEntry::TogglId).big_unsigned().not_null().unique_key())
.big_unsigned()
.not_null()
.unique_key(),
)
.col(ColumnDef::new(TimeEntry::Description).string().not_null()) .col(ColumnDef::new(TimeEntry::Description).string().not_null())
.col(ColumnDef::new(TimeEntry::ProjectId).big_unsigned()) .col(ColumnDef::new(TimeEntry::ProjectId).big_unsigned())
.col( .col(ColumnDef::new(TimeEntry::Start).timestamp_with_time_zone().not_null())
ColumnDef::new(TimeEntry::Start) .col(ColumnDef::new(TimeEntry::Stop).timestamp_with_time_zone().not_null())
.timestamp_with_time_zone()
.not_null(),
)
.col(
ColumnDef::new(TimeEntry::Stop)
.timestamp_with_time_zone()
.not_null(),
)
.col(ColumnDef::new(TimeEntry::RawJson).json_binary().not_null()) .col(ColumnDef::new(TimeEntry::RawJson).json_binary().not_null())
.to_owned(), .to_owned(),
) )
@ -58,5 +46,5 @@ enum TimeEntry {
ProjectId, ProjectId,
Start, Start,
Stop, Stop,
RawJson, RawJson
} }

View File

@ -20,11 +20,7 @@ impl MigrationTrait for Migration {
.col(ColumnDef::new(Client::Name).string().not_null()) .col(ColumnDef::new(Client::Name).string().not_null())
.col(ColumnDef::new(Client::Archived).boolean().not_null()) .col(ColumnDef::new(Client::Archived).boolean().not_null())
.col(ColumnDef::new(Client::WorkspaceId).integer().not_null()) .col(ColumnDef::new(Client::WorkspaceId).integer().not_null())
.col( .col(ColumnDef::new(Client::At).timestamp_with_time_zone().not_null())
ColumnDef::new(Client::At)
.timestamp_with_time_zone()
.not_null(),
)
.col(ColumnDef::new(Client::ServerDeletedAt).timestamp_with_time_zone()) .col(ColumnDef::new(Client::ServerDeletedAt).timestamp_with_time_zone())
.to_owned(), .to_owned(),
) )
@ -46,5 +42,5 @@ enum Client {
Archived, Archived,
WorkspaceId, WorkspaceId,
At, At,
ServerDeletedAt, ServerDeletedAt
} }

View File

@ -11,19 +11,8 @@ impl MigrationTrait for Migration {
Table::create() Table::create()
.table(Project::Table) .table(Project::Table)
.if_not_exists() .if_not_exists()
.col( .col(ColumnDef::new(Project::Id).integer().primary_key().auto_increment().not_null())
ColumnDef::new(Project::Id) .col(ColumnDef::new(Project::TogglId).big_unsigned().not_null().unique_key())
.integer()
.primary_key()
.auto_increment()
.not_null(),
)
.col(
ColumnDef::new(Project::TogglId)
.big_unsigned()
.not_null()
.unique_key(),
)
.col( .col(
ColumnDef::new(Project::WorkspaceId) ColumnDef::new(Project::WorkspaceId)
.big_unsigned() .big_unsigned()
@ -38,15 +27,13 @@ impl MigrationTrait for Migration {
.await?; .await?;
// Create foreign key // Create foreign key
manager manager.create_foreign_key(
.create_foreign_key( ForeignKey::create()
ForeignKey::create() .name("project_client_id")
.name("project_client_id") .from(Project::Table, Project::ClientId)
.from(Project::Table, Project::ClientId) .to(Client::Table, Client::Id)
.to(Client::Table, Client::Id) .to_owned(),
.to_owned(), ).await?;
)
.await?;
Ok(()) Ok(())
} }

View File

@ -1,69 +0,0 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.add_column(ColumnDef::new(Project::Color).text())
.add_column(ColumnDef::new(Project::ServerCreatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerUpdatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerDeletedAt).timestamp_with_time_zone())
.to_owned(),
)
.await?;
manager
.get_connection()
.execute_unprepared(
r#"
update "project"
set "color" = raw_json ->> 'color',
"server_created_at" = (raw_json ->> 'created_at') :: timestamptz,
"server_updated_at" = (raw_json ->> 'at') :: timestamptz,
"server_deleted_at" = (raw_json ->> 'server_deleted_at') :: timestamptz
"#,
)
.await?;
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.modify_column(ColumnDef::new(Project::Color).not_null())
.modify_column(ColumnDef::new(Project::ServerCreatedAt).not_null())
.modify_column(ColumnDef::new(Project::ServerUpdatedAt).not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.drop_column(Project::Color)
.drop_column(Project::ServerCreatedAt)
.drop_column(Project::ServerUpdatedAt)
.drop_column(Project::ServerDeletedAt)
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum Project {
Table,
Color,
ServerCreatedAt,
ServerUpdatedAt,
ServerDeletedAt,
}

View File

@ -1,70 +0,0 @@
use sea_orm_migration::prelude::*;
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.add_column(
ColumnDef::new(TimeEntry::Tags)
.json_binary()
.default(serde_json::json!([]))
.not_null(),
)
.add_column(
ColumnDef::new(TimeEntry::ServerUpdatedAt).timestamp_with_time_zone(),
)
.add_column(
ColumnDef::new(TimeEntry::ServerDeletedAt).timestamp_with_time_zone(),
)
.to_owned(),
)
.await?;
manager
.get_connection()
.execute_unprepared(
r#"
update "time_entry"
set "tags" = coalesce(raw_json -> 'tags', '[]' :: jsonb),
"server_updated_at" = (raw_json ->> 'at') :: timestamptz;
"#,
)
.await?;
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.modify_column(ColumnDef::new(TimeEntry::ServerUpdatedAt).not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.drop_column(TimeEntry::Tags)
.drop_column(TimeEntry::ServerDeletedAt)
.drop_column(TimeEntry::ServerUpdatedAt)
.to_owned(),
)
.await
}
}
#[derive(DeriveIden)]
enum TimeEntry {
Table,
ServerUpdatedAt,
ServerDeletedAt,
Tags,
}

178
src/client.rs Normal file
View File

@ -0,0 +1,178 @@
use reqwest::Client;
use serde_json::Value;
use std::collections::HashMap;
use std::time::Duration;
use hyper::HeaderMap;
use tracing::instrument;
use tracing::log::debug;
use crate::types::{Current, Project, ProjectClient, ReportEntry, TogglQuery};
#[derive(Debug, Clone)]
pub struct TogglClient {
client: Client,
workspace_id: String,
base_url: String,
reports_base_url: String,
headers: HeaderMap,
}
impl TogglClient {
pub async fn check_health(&self) -> bool {
true
}
pub fn new(workspace_id: &str, toggl_auth: &str) -> Self {
let client = Client::builder()
.default_headers(Self::default_headers(toggl_auth))
.build()
.expect("Failed to build reqwest client");
Self {
client,
workspace_id: workspace_id.to_string(),
base_url: "https://api.track.toggl.com/api/v9".to_string(),
reports_base_url: "https://api.track.toggl.com/reports/api/v3".to_string(),
headers: Self::default_headers(toggl_auth),
}
}
fn default_headers(toggl_auth: &str) -> reqwest::header::HeaderMap {
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"Authorization",
reqwest::header::HeaderValue::from_str(&format!("Basic {}", toggl_auth)).unwrap(),
);
headers
}
pub async fn fetch_projects(&self) -> Result<Vec<Project>, reqwest::Error> {
let url = format!(
"{base_url}/workspaces/{}/projects",
self.workspace_id,
base_url = self.base_url,
);
let res = self
.client
.get(&url)
.headers(self.headers.clone())
.send()
.await?
.json::<Vec<Project>>()
.await
.unwrap();
Ok(res)
}
pub async fn fetch_clients(&self) -> Result<Vec<ProjectClient>, reqwest::Error> {
let url = format!(
"{base_url}/workspaces/{}/clients",
self.workspace_id,
base_url = self.base_url,
);
let res = self
.client
.get(&url)
.headers(self.headers.clone())
.send()
.await?
.json::<Vec<ProjectClient>>()
.await
.unwrap();
Ok(res)
}
pub async fn get_current(&self) -> Result<Option<Current>, reqwest::Error> {
let url = format!(
"{base_url}/me/time_entries/current",
base_url = self.base_url
);
let res = self
.client
.get(url)
.send()
.await?
.json::<Option<Current>>()
.await
.unwrap();
Ok(res)
}
fn create_filters(original_filters: &TogglQuery, last_row_id: u64) -> TogglQuery {
let mut filters: TogglQuery = original_filters.clone();
filters.first_row_number = Some(last_row_id + 1);
filters
}
#[instrument(skip(self, filters))]
pub async fn full_report(
&self,
filters: &TogglQuery,
) -> anyhow::Result<Vec<ReportEntry>> {
let url = format!(
"{base_url}/workspace/{workspace_id}/search/time_entries",
base_url = self.reports_base_url,
workspace_id = self.workspace_id
);
let mut last_row_number = Some(0);
let mut results = vec![];
while let Some(last_row_number_n) = last_row_number {
debug!("Fetching page starting with {}", last_row_number_n);
// If we are not on the first page, wait a bit to avoid rate limiting
if last_row_number_n != 0 {
tokio::time::sleep(Duration::from_millis(1000)).await;
}
// TODO: Implement rate limiting
let response = self
.client
.post(&url)
.headers(self.headers.clone())
.json(&Self::create_filters(&filters, last_row_number_n))
.send()
.await?;
let data = response
.json::<Vec<ReportEntry>>()
.await?;
last_row_number = data.last().map(|e| e.row_number as u64);
data.into_iter().for_each(|e| results.push(e));
}
Ok(results)
}
pub async fn start_time_entry(&self, mut body: HashMap<String, Value>) -> anyhow::Result<()> {
let url = format!(
"{base_url}/workspaces/{workspace_id}/time_entries",
base_url = self.base_url,
workspace_id = self.workspace_id
);
body.insert(
"workspace_id".to_string(),
self.workspace_id.parse::<i32>().unwrap().into(),
);
dbg!(self.client
.post(url)
.headers(self.headers.clone())
.json(&body)
.send()
.await?
.text()
.await?);
Ok(())
}
}

View File

@ -1,69 +0,0 @@
use crate::utils::Result;
use anyhow::anyhow;
use chrono::{NaiveDate, NaiveTime};
use csv::StringRecord;
mod headings {
pub const USER: usize = 1;
pub const USER_EMAIL: usize = 2;
pub const CLIENT_NAME: usize = 3;
pub const PROJECT_NAME: usize = 4;
pub const TASK_NAME: usize = 5;
pub const DESCRIPTION: usize = 6;
pub const BILLABLE: usize = 7;
pub const START_DATE: usize = 8;
pub const END_DATE: usize = 9;
pub const START_TIME: usize = 10;
pub const END_TIME: usize = 11;
pub const DURATION: usize = 12;
pub const TAGS: usize = 13;
}
fn parse_csv_row(row: StringRecord) -> Result<crate::entity::time_entry::Model> {
let start_date = row
.get(headings::START_DATE)
.ok_or(anyhow!("Missing start date in CSV"))?;
let start_time = row
.get(headings::START_TIME)
.ok_or(anyhow!("Missing start time in CSV"))?;
let end_date = row
.get(headings::END_DATE)
.ok_or(anyhow!("Missing end date in CSV"))?;
let end_time = row
.get(headings::END_TIME)
.ok_or(anyhow!("Missing end time in CSV"))?;
let start_time = NaiveTime::parse_from_str(start_time, "%H:%M:%S")?;
let end_time = NaiveTime::parse_from_str(end_time, "%H:%M:%S")?;
let start_date = NaiveDate::parse_from_str(start_date, "%Y-%m-%d")?;
let end_date = NaiveDate::parse_from_str(end_date, "%Y-%m-%d")?;
let start = start_date.and_time(start_time);
let end = end_date.and_time(end_time);
let description = row
.get(headings::DESCRIPTION)
.ok_or(anyhow!("Missing description in CSV"))?;
let project_name = row
.get(headings::PROJECT_NAME)
.ok_or(anyhow!("Missing project name in CSV"))?;
let client_name = row
.get(headings::CLIENT_NAME)
.ok_or(anyhow!("Missing client name in CSV"))?;
let tags = row
.get(headings::TAGS)
.ok_or(anyhow!("Missing tags in CSV"))?;
let task_name = row
.get(headings::TASK_NAME)
.ok_or(anyhow!("Missing task name in CSV"))?;
let billable = match row
.get(headings::BILLABLE)
.ok_or(anyhow!("Missing billable in CSV"))?
{
"Yes" => true,
"No" => false,
_ => unimplemented!("Unknown billable value"),
};
unimplemented!("Refactor model to support non-json sources")
}

View File

@ -1,47 +1,23 @@
use crate::entity::{client, project, time_entry}; use crate::entity::{client, project, time_entry};
use crate::toggl_api::types::{Client, Project, ReportRow, TimeEntry}; use crate::types::{Project, ProjectClient, ReportEntry, TimeEntry};
use sea_orm::sea_query::OnConflict; use sea_orm::sea_query::OnConflict;
use sea_orm::{NotSet, Set}; use sea_orm::{NotSet, Set};
impl ReportRow { impl ReportEntry {
pub fn to_time_entries(&self, workspace_id: i64) -> Vec<TimeEntry> { pub(crate) fn as_models(&self) -> Vec<time_entry::ActiveModel> {
self.time_entries self.time_entries
.iter() .iter()
.map(|inner| TimeEntry { .map(|inner| time_entry::ActiveModel {
id: inner.id as i64, id: NotSet,
description: self.description.clone(), toggl_id: Set(inner.id as i64),
project_id: self.project_id.map(|id| id as i64), description: Set(self.description.clone()),
task_id: self.task_id.map(|id| id as i64), project_id: Set(self.project_id.map(|id| id as i64)),
billable: self.billable, start: Set(chrono::DateTime::parse_from_rfc3339(&inner.start).unwrap()),
start: inner.start, stop: Set(chrono::DateTime::parse_from_rfc3339(&inner.stop).unwrap()),
stop: Some(inner.stop), raw_json: Set(serde_json::to_value(inner).unwrap()),
at: inner.at,
server_deleted_at: None,
tags: vec![], // TODO: tags on report row import, need to track in separate table
workspace_id,
duration: inner.seconds as i64,
tag_ids: self.tag_ids.iter().map(|ids| *ids as i64).collect(),
user_id: self.user_id as i64,
}) })
.collect() .collect()
} }
}
impl TimeEntry {
pub(crate) fn as_model(&self) -> time_entry::ActiveModel {
time_entry::ActiveModel {
id: NotSet,
toggl_id: Set(self.id),
description: Set(self.description.clone()),
project_id: Set(self.project_id),
start: Set(self.start.fixed_offset()),
stop: Set(self.stop.unwrap().fixed_offset()),
raw_json: Set(serde_json::to_value(self).unwrap()),
server_updated_at: Set(self.at.fixed_offset()),
server_deleted_at: Set(self.server_deleted_at.map(|dt| dt.fixed_offset())),
tags: Set(serde_json::to_value(&self.tags).unwrap()),
}
}
pub fn grafting_conflict_statement() -> OnConflict { pub fn grafting_conflict_statement() -> OnConflict {
OnConflict::column(time_entry::Column::TogglId) OnConflict::column(time_entry::Column::TogglId)
@ -51,23 +27,20 @@ impl TimeEntry {
time_entry::Column::Start, time_entry::Column::Start,
time_entry::Column::Stop, time_entry::Column::Stop,
time_entry::Column::RawJson, time_entry::Column::RawJson,
time_entry::Column::ServerUpdatedAt,
time_entry::Column::ServerDeletedAt,
time_entry::Column::Tags,
]) ])
.to_owned() .to_owned()
} }
} }
impl Client { impl ProjectClient {
pub fn as_model(&self) -> client::ActiveModel { pub fn as_model(&self) -> client::ActiveModel {
client::ActiveModel { client::ActiveModel {
id: Set(self.id), id: Set(self.id),
name: Set(self.name.clone()), name: Set(self.name.clone()),
archived: Set(self.archived), archived: Set(self.archived.clone()),
workspace_id: Set(self.wid), workspace_id: Set(self.wid),
at: Set(self.at.clone().fixed_offset()), at: Set(self.at.clone().fixed_offset()),
server_deleted_at: Set(self.server_deleted_at.map(|dt| dt.fixed_offset())), server_deleted_at: Set(self.server_deleted_at.clone().map(|dt| dt.fixed_offset())),
} }
} }
@ -94,10 +67,6 @@ impl Project {
client_id: Set(self.client_id.map(|id| id as i32)), client_id: Set(self.client_id.map(|id| id as i32)),
workspace_id: Set(self.workspace_id as i64), workspace_id: Set(self.workspace_id as i64),
raw_json: Set(serde_json::to_value(self).unwrap()), raw_json: Set(serde_json::to_value(self).unwrap()),
color: Set(self.color.clone()),
server_created_at: Set(self.created_at.clone().fixed_offset()),
server_updated_at: Set(self.at.clone().fixed_offset()),
server_deleted_at: Set(self.server_deleted_at.map(|dt| dt.fixed_offset())),
} }
} }
@ -109,10 +78,6 @@ impl Project {
project::Column::ClientId, project::Column::ClientId,
project::Column::WorkspaceId, project::Column::WorkspaceId,
project::Column::RawJson, project::Column::RawJson,
project::Column::Color,
project::Column::ServerCreatedAt,
project::Column::ServerUpdatedAt,
project::Column::ServerDeletedAt,
]) ])
.to_owned() .to_owned()
} }

View File

@ -1,5 +1,4 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.2 //! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.2
#![allow(unused_imports)]
pub use super::client::Entity as Client; pub use super::client::Entity as Client;
pub use super::project::Entity as Project; pub use super::project::Entity as Project;

View File

@ -16,11 +16,6 @@ pub struct Model {
pub active: bool, pub active: bool,
#[sea_orm(column_type = "JsonBinary")] #[sea_orm(column_type = "JsonBinary")]
pub raw_json: Json, pub raw_json: Json,
#[sea_orm(column_type = "Text")]
pub color: String,
pub server_created_at: DateTimeWithTimeZone,
pub server_updated_at: DateTimeWithTimeZone,
pub server_deleted_at: Option<DateTimeWithTimeZone>,
} }
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]

View File

@ -16,10 +16,6 @@ pub struct Model {
pub stop: DateTimeWithTimeZone, pub stop: DateTimeWithTimeZone,
#[sea_orm(column_type = "JsonBinary")] #[sea_orm(column_type = "JsonBinary")]
pub raw_json: Json, pub raw_json: Json,
#[sea_orm(column_type = "JsonBinary")]
pub tags: Json,
pub server_updated_at: DateTimeWithTimeZone,
pub server_deleted_at: Option<DateTimeWithTimeZone>,
} }
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]

View File

@ -1,19 +1,35 @@
use crate::toggl_api::TogglApiClient; use crate::client::TogglClient;
use crate::entity::prelude::TimeEntry;
use crate::entity::time_entry;
use crate::entity::time_entry::ActiveModel;
use crate::types::{Current, Project, ProjectClient, ReportEntry, TogglQuery};
use anyhow::anyhow;
use axum::extract::multipart::Field;
use axum::extract::{Multipart, Query};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{get, post}; use axum::routing::{get, post};
use axum::{Extension, Router}; use axum::{Extension, Json, Router};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use utils::{shutdown_signal, Result};
use chrono::{NaiveDate, NaiveTime};
use clap::Parser; use clap::Parser;
use migration::{Migrator, MigratorTrait}; use migration::{Migrator, MigratorTrait};
use sea_orm::sea_query::IntoCondition;
use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter};
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use tower_http::trace::TraceLayer; use tower_http::trace::TraceLayer;
use utils::{shutdown_signal, Result}; use tracing::{debug, instrument};
mod csv_parser; mod client;
mod db; mod db;
mod entity; mod entity;
mod poll; mod poll;
mod routes; mod types;
mod sync_service;
mod toggl_api;
mod utils; mod utils;
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
@ -35,14 +51,186 @@ struct Config {
poll_period: u64, poll_period: u64,
} }
#[instrument(skip(db, toggl_client))]
pub async fn report(
Extension(toggl_client): Extension<TogglClient>,
Extension(db): Extension<DatabaseConnection>,
Json(query): Json<TogglQuery>,
) -> Result<Json<Vec<ReportEntry>>> {
let report = toggl_client.full_report(&query).await?;
debug!("Returned results: {:?}", report);
// We don't perform any deletes on report-fetched entries
cache_report(&db, &report, None).await?;
Ok(Json(report))
}
#[instrument(skip_all)]
async fn cache_report(
db: &DatabaseConnection,
models: &Vec<ReportEntry>,
exclusive_on: Option<Condition>,
) -> Result<()> {
let models = models.iter().flat_map(|entry| entry.as_models());
let models = models.collect::<Vec<_>>();
let ids = models
.iter()
.map(|entry| entry.toggl_id.clone().unwrap())
.collect::<Vec<_>>();
debug!("Caching report entries: {:?}", models);
// TODO: Why is this needed?
if models.is_empty() {
return Ok(());
}
TimeEntry::insert_many(models)
.on_conflict(ReportEntry::grafting_conflict_statement())
.exec(db)
.await?;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(())
}
#[instrument(skip(toggl_client))]
pub async fn current(
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Option<Current>>> {
Ok(toggl_client.get_current().await.map(Json)?)
}
#[instrument(skip(toggl_client))]
pub async fn start_time_entry(
Extension(toggl_client): Extension<TogglClient>,
Json(body): Json<HashMap<String, Value>>,
) -> Result<impl IntoResponse> {
toggl_client.start_time_entry(body).await?;
Ok((StatusCode::OK, "Ok"))
}
#[instrument(skip(db, toggl_client))]
async fn projects(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Vec<Project>>> {
let projects = toggl_client.fetch_projects().await?;
entity::project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(projects))
}
#[instrument(skip(toggl_client, db))]
async fn clients(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Vec<ProjectClient>>> {
let clients = toggl_client.fetch_clients().await?;
entity::client::Entity::insert_many(clients.iter().map(ProjectClient::as_model))
.on_conflict(ProjectClient::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(clients))
}
async fn health(Extension(toggl_client): Extension<TogglClient>) -> Result<&'static str> {
return if toggl_client.check_health().await {
Ok("Ok")
} else {
Err(anyhow!("Panopto health check failed").into())
};
}
#[derive(Debug, Clone, Deserialize)]
struct RefreshQuery {
start_date: Option<String>,
}
#[instrument(skip(toggl_client, db))]
async fn refresh(
Extension(toggl_client): Extension<TogglClient>,
Extension(db): Extension<DatabaseConnection>,
Query(RefreshQuery { start_date }): Query<RefreshQuery>,
) -> Result<&'static str> {
let end_date = chrono::Utc::now();
let end_date_query_string = end_date.date_naive().format("%Y-%m-%d").to_string();
let start_date_query_string = start_date.unwrap_or(end_date_query_string.clone());
let start_date = NaiveDate::parse_from_str(&start_date_query_string, "%Y-%m-%d")?;
let query = TogglQuery {
start_date: Some(start_date_query_string),
end_date: Some(end_date_query_string),
..Default::default()
};
let report = toggl_client.full_report(&query).await?;
let exclusivity_condition = day_exclusivity_condition(start_date, end_date.date_naive());
cache_report(&db, &report, Some(exclusivity_condition)).await?;
Ok("Ok")
}
fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition {
time_entry::Column::Start
.between(
start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()),
end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()),
)
.into_condition()
}
fn from_csv_row(row: csv::StringRecord) -> ActiveModel {
unimplemented!("Need to refactor db first")
}
async fn import_csv(
Extension(db): Extension<DatabaseConnection>,
mut multipart: Multipart,
) -> Result<impl IntoResponse> {
return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented"));
// while let Some(field) = multipart.next_field().await? {
// // if let Some("csv") = field.name() {
// // let csv = field.bytes().await?;
// // let mut csv = csv::Reader::from_reader(csv.as_ref());
// // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row);
// //
// // time_entry::Entity::insert_many(data.collect::<Result<_>>().unwrap())
// // .on_conflict(ReportEntry::grafting_conflict_statement())
// // .exec(&db)
// // .await
// // .unwrap()
// // }
// }
}
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
// install global collector configured based on RUST_LOG env var. // install global collector configured based on RUST_LOG env var.
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
let config = Config::parse(); let config = Config::parse();
let toggl_client = let toggl_client = TogglClient::new(
TogglApiClient::new(&config.workspace_id.to_string(), &config.toggl_api_token); &config.workspace_id.to_string(),
&STANDARD.encode(&format!("{}:api_token", config.toggl_api_token)),
);
let db = sea_orm::Database::connect(config.database_url) let db = sea_orm::Database::connect(config.database_url)
.await .await
@ -58,14 +246,14 @@ async fn main() -> Result<()> {
// build our application with a route // build our application with a route
let app = Router::new() let app = Router::new()
.route("/import_csv", post(routes::import_csv)) .route("/import_csv", post(import_csv))
.route("/health", get(routes::health)) .route("/health", get(health))
.route("/current", get(routes::current)) .route("/current", get(current))
.route("/refresh", post(routes::refresh)) .route("/refresh", post(refresh))
.route("/report", post(routes::report)) .route("/report", post(report))
.route("/projects", get(routes::projects)) .route("/projects", get(projects))
.route("/clients", get(routes::clients)) .route("/clients", get(clients))
.route("/start_time_entry", post(routes::start_time_entry)) .route("/start_time_entry", post(start_time_entry))
.layer(Extension(toggl_client)) .layer(Extension(toggl_client))
.layer(Extension(db)) .layer(Extension(db))
.layer(TraceLayer::new_for_http()); .layer(TraceLayer::new_for_http());

View File

@ -1,23 +1,23 @@
use crate::entity::time_entry; use crate::client::TogglClient;
use crate::sync_service::{update_database, UpdateStats}; use crate::entity::{client, project, time_entry};
use crate::toggl_api::TogglApiClient; use crate::types::{Project, ProjectClient, TogglQuery};
use crate::utils; use sea_orm::{DatabaseConnection, EntityTrait, QuerySelect};
use chrono::{DateTime, FixedOffset};
use migration::Order;
use sea_orm::{DatabaseConnection, EntityTrait, QueryOrder, QuerySelect};
use std::ops::Sub;
use tracing::instrument; use tracing::instrument;
use crate::{day_exclusivity_condition, utils};
#[tracing::instrument(skip(client, db))] #[tracing::instrument(skip(client, db))]
pub async fn poll_job(client: TogglApiClient, db: DatabaseConnection, poll_period: u64) { pub async fn poll_job(client: TogglClient, db: DatabaseConnection, poll_period: u64) {
// Periodically poll the Toggl API for new time entries for today to cache them in the database // Every 2h, poll the Toggl API for new time entries for today to cache them in the database
let period = tokio::time::Duration::from_secs(poll_period); let period = tokio::time::Duration::from_secs(poll_period);
loop { loop {
tracing::info!("Polling Toggl API"); tracing::info!("Polling Toggl API");
match perform_poll(&client, &db).await { match perform_poll(&client, &db).await {
Ok(poll_update_data) => { Ok(report_entries_count) => {
tracing::info!("Successfully polled Toggl API: {:?}", poll_update_data); tracing::info!(
"Successfully polled Toggl API: {:?} entries retrieved",
report_entries_count
);
} }
Err(error) => { Err(error) => {
@ -29,29 +29,60 @@ pub async fn poll_job(client: TogglApiClient, db: DatabaseConnection, poll_perio
} }
} }
#[instrument(skip(toggl_client, db))] #[instrument(skip(client, db))]
pub async fn perform_poll( pub async fn perform_poll(
toggl_client: &TogglApiClient, client: &TogglClient,
db: &DatabaseConnection, db: &DatabaseConnection,
) -> utils::Result<UpdateStats> { ) -> utils::Result<usize> {
let since = time_entry::Entity::find() let now = chrono::Utc::now();
let today_string = now
.date_naive()
.format("%Y-%m-%d")
.to_string();
let report = client
.full_report(&TogglQuery {
start_date: Some(today_string.clone()),
end_date: Some(today_string.clone()),
..Default::default()
})
.await?;
let existing_project_ids = project::Entity::find()
.select_only() .select_only()
.column(time_entry::Column::ServerUpdatedAt) .column(project::Column::TogglId)
.order_by(time_entry::Column::ServerUpdatedAt, Order::Desc) .into_tuple::<i64>()
.into_tuple::<DateTime<FixedOffset>>() .all(db)
.one(db)
.await?; .await?;
let since = since.unwrap_or( let new_projects = report
chrono::Utc::now() .iter()
.sub(chrono::Duration::days(1)) .filter_map(|entry| entry.project_id)
.fixed_offset(), .any(|project_id| !existing_project_ids.contains(&(project_id as i64)));
);
let time_entries = toggl_client if new_projects {
.fetch_time_entries_modified_since(since.to_utc()) let clients = client.fetch_clients().await?;
.await?;
// These are changes only so there is no need to enforce exclusivity client::Entity::insert_many(clients.iter().map(ProjectClient::as_model))
update_database(db, toggl_client, &time_entries, None).await .on_conflict(ProjectClient::grafting_conflict_statement())
.exec(db)
.await?;
let projects = client.fetch_projects().await?;
project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(db)
.await?;
}
crate::cache_report(
&db,
&report,
Some(
day_exclusivity_condition(now.date_naive(), now.date_naive()),
),
)
.await?;
Ok(report.len())
} }

View File

@ -1,149 +0,0 @@
use crate::sync_service::UpdateStats;
use crate::toggl_api::types::{self, Client, Project, ReportRow, TogglReportQuery};
use crate::toggl_api::TogglApiClient;
use crate::{entity, sync_service, utils};
use anyhow::anyhow;
use axum::extract::{Multipart, Query};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::{Extension, Json};
use chrono::{DateTime, Utc};
use sea_orm::{DatabaseConnection, EntityTrait};
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
use tracing::instrument;
#[instrument(skip(db, toggl_client))]
pub async fn report(
Extension(toggl_client): Extension<TogglApiClient>,
Extension(db): Extension<DatabaseConnection>,
Json(query): Json<TogglReportQuery>,
) -> utils::Result<Json<Vec<ReportRow>>> {
let report = toggl_client.full_report(&query).await?;
let time_entries = report
.iter()
.flat_map(|entry| entry.to_time_entries(toggl_client.workspace_id()))
.collect::<Vec<_>>();
sync_service::update_database(&db, &toggl_client, &time_entries, None).await?;
Ok(Json(report))
}
#[instrument(skip(toggl_client))]
pub async fn current(
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<Json<Option<types::TimeEntry>>> {
toggl_client.fetch_current_time_entry().await.map(Json)
}
#[instrument(skip(toggl_client))]
pub async fn start_time_entry(
Extension(toggl_client): Extension<TogglApiClient>,
Json(body): Json<HashMap<String, Value>>,
) -> utils::Result<impl IntoResponse> {
toggl_client.start_time_entry(body).await?;
Ok((StatusCode::OK, "Ok"))
}
#[instrument(skip(db, toggl_client))]
pub async fn projects(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<Json<Vec<Project>>> {
let projects = toggl_client.fetch_projects().await?;
entity::project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(projects))
}
#[instrument(skip(toggl_client, db))]
pub async fn clients(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<Json<Vec<Client>>> {
let clients = toggl_client.fetch_clients().await?;
entity::client::Entity::insert_many(clients.iter().map(Client::as_model))
.on_conflict(Client::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(clients))
}
pub async fn health(
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<&'static str> {
if toggl_client.check_health().await {
Ok("Ok")
} else {
Err(anyhow!("Toggl health check failed").into())
}
}
pub async fn import_csv(
Extension(db): Extension<DatabaseConnection>,
mut multipart: Multipart,
) -> utils::Result<impl IntoResponse> {
return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented"));
// while let Some(field) = multipart.next_field().await? {
// // if let Some("csv") = field.name() {
// // let csv = field.bytes().await?;
// // let mut csv = csv::Reader::from_reader(csv.as_ref());
// // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row);
// //
// // time_entry::Entity::insert_many(data.collect::<Result<_>>().unwrap())
// // .on_conflict(ReportEntry::grafting_conflict_statement())
// // .exec(&db)
// // .await
// // .unwrap()
// // }
// }
}
#[derive(Debug, Clone, Deserialize)]
pub struct RefreshQuery {
start_date: Option<DateTime<Utc>>,
end_date: Option<DateTime<Utc>>,
}
#[instrument(skip(toggl_client, db))]
pub async fn refresh(
Extension(toggl_client): Extension<TogglApiClient>,
Extension(db): Extension<DatabaseConnection>,
Query(RefreshQuery {
start_date,
end_date,
}): Query<RefreshQuery>,
) -> utils::Result<Json<UpdateStats>> {
let time_entries = match (start_date, end_date) {
(Some(start_date), Some(end_date)) => {
toggl_client
.fetch_time_entries_in_range(start_date, end_date)
.await?
}
(Some(start_date), None) => {
let end_date = Utc::now();
toggl_client
.fetch_time_entries_in_range(start_date, end_date)
.await?
}
(None, Some(_)) => {
return Err(anyhow!("start_date must be provided if end_date is provided").into());
}
_ => toggl_client.fetch_recent_time_entries().await?,
};
sync_service::update_database(&db, &toggl_client, &time_entries, None)
.await
.map(Json)
}

View File

@ -1,118 +0,0 @@
use crate::entity::time_entry::Entity as TimeEntry;
use crate::entity::{client, project, time_entry};
use crate::toggl_api::types::{Client, Project, TimeEntry as ToggleApiTimeEntry};
use crate::toggl_api::TogglApiClient;
use crate::utils;
use migration::Condition;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, QuerySelect};
use serde::Serialize;
#[derive(Debug, Serialize)]
pub struct UpdateStats {
retrieved: UpdateStatsInner,
written: UpdateStatsInner,
}
#[derive(Debug, Serialize)]
pub struct UpdateStatsInner {
updated: usize,
deleted: usize,
}
pub async fn update_database(
db: &DatabaseConnection,
toggl_client: &TogglApiClient,
time_entries: &[ToggleApiTimeEntry],
exclusive_on: Option<Condition>,
) -> utils::Result<UpdateStats> {
let (deleted_entries, time_entries) = time_entries
.iter()
.partition::<Vec<_>, _>(|entry| entry.server_deleted_at.is_some());
let retrieved = UpdateStatsInner {
updated: time_entries.len(),
deleted: deleted_entries.len(),
};
let mut written = UpdateStatsInner {
updated: 0,
deleted: 0,
};
let deleted_ids = deleted_entries
.iter()
.map(|entry| entry.id)
.collect::<Vec<_>>();
if !deleted_ids.is_empty() {
let delete_result = TimeEntry::delete_many()
.filter(time_entry::Column::TogglId.is_in(deleted_ids))
.exec(db)
.await?;
written.deleted = delete_result.rows_affected as usize;
}
let existing_project_ids = project::Entity::find()
.select_only()
.column(project::Column::TogglId)
.into_tuple::<i64>()
.all(db)
.await?;
let new_projects = time_entries
.iter()
.filter_map(|entry| entry.project_id)
.any(|project_id| !existing_project_ids.contains(&project_id));
if new_projects {
let clients = toggl_client.fetch_clients().await?;
client::Entity::insert_many(clients.iter().map(Client::as_model))
.on_conflict(Client::grafting_conflict_statement())
.exec(db)
.await?;
let projects = toggl_client.fetch_projects().await?;
project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(db)
.await?;
}
let ids = time_entries
.iter()
.map(|entry| entry.id)
.collect::<Vec<_>>();
let models = time_entries
.into_iter()
.map(|entry| entry.as_model())
.collect::<Vec<_>>();
// TODO: Why is this needed?
if models.is_empty() {
return Ok(UpdateStats { retrieved, written });
}
let insert_result = TimeEntry::insert_many(models)
.on_conflict(ToggleApiTimeEntry::grafting_conflict_statement())
.exec_without_returning(db)
.await?;
written.updated = insert_result as usize;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(UpdateStats { retrieved, written })
}

View File

@ -1,229 +0,0 @@
use crate::toggl_api::types::{
Client as ProjectClient, Project, ReportRow, TimeEntry, TogglReportQuery,
};
use anyhow::anyhow;
use axum::http::StatusCode;
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use chrono::{DateTime, Utc};
use hyper::HeaderMap;
use reqwest::header::HeaderValue;
use reqwest::{Client, RequestBuilder, Response};
use serde_json::Value;
use std::collections::HashMap;
use std::time::Duration;
use tracing::instrument;
use tracing::log::debug;
#[derive(Debug, Clone)]
pub struct TogglApiClient {
client: Client,
workspace_id: String,
base_url: String,
reports_base_url: String,
}
impl TogglApiClient {
async fn make_request(&self, request_builder: RequestBuilder) -> crate::Result<Response> {
loop {
let builder = request_builder
.try_clone()
.ok_or(anyhow!("Failed to clone request builder"))?;
let response = self.client.execute(builder.build()?).await?;
// If we are rate limited, wait a bit and try again
if response.status() == StatusCode::TOO_MANY_REQUESTS {
tokio::time::sleep(Duration::from_secs(5)).await;
} else {
return Ok(response);
}
}
}
pub fn workspace_id(&self) -> i64 {
self.workspace_id.parse().unwrap()
}
pub async fn check_health(&self) -> bool {
true
}
pub fn new(workspace_id: &str, api_token: &str) -> Self {
let toggl_auth = &STANDARD.encode(format!("{}:api_token", api_token));
let client = Client::builder()
.default_headers(Self::default_headers(toggl_auth))
.build()
.expect("Failed to build reqwest client");
Self {
client,
workspace_id: workspace_id.to_string(),
base_url: "https://api.track.toggl.com/api/v9".to_string(),
reports_base_url: "https://api.track.toggl.com/reports/api/v3".to_string(),
}
}
fn default_headers(toggl_auth: &str) -> HeaderMap {
let mut headers = HeaderMap::new();
let mut value = HeaderValue::from_str(&format!("Basic {}", toggl_auth)).unwrap();
value.set_sensitive(true);
headers.insert("Authorization", value);
headers
}
pub async fn fetch_projects(&self) -> crate::Result<Vec<Project>> {
let url = format!(
"{base_url}/workspaces/{}/projects",
self.workspace_id,
base_url = self.base_url,
);
let projects = self
.make_request(self.client.get(&url))
.await?
.json::<Vec<Project>>()
.await?;
Ok(projects)
}
pub async fn fetch_clients(&self) -> crate::Result<Vec<ProjectClient>> {
let url = format!(
"{base_url}/workspaces/{}/clients",
self.workspace_id,
base_url = self.base_url,
);
let clients = self
.make_request(self.client.get(&url))
.await?
.json::<Vec<ProjectClient>>()
.await?;
Ok(clients)
}
pub async fn fetch_recent_time_entries(&self) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(self
.make_request(self.client.get(url))
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_time_entries_modified_since(
&self,
date_time: DateTime<Utc>,
) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(self
.make_request(
self.client
.get(url)
.query(&[("since", date_time.timestamp())]),
)
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_time_entries_in_range(
&self,
start: DateTime<Utc>,
end: DateTime<Utc>,
) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(self
.make_request(self.client.get(url).query(&[
("start_date", start.to_rfc3339()),
("end_date", end.to_rfc3339()),
]))
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_current_time_entry(&self) -> crate::Result<Option<TimeEntry>> {
let url = format!(
"{base_url}/me/time_entries/current",
base_url = self.base_url
);
let res = self
.make_request(self.client.get(url))
.await?
.json::<Option<TimeEntry>>()
.await?;
Ok(res)
}
pub async fn start_time_entry(&self, mut body: HashMap<String, Value>) -> crate::Result<()> {
let url = format!(
"{base_url}/workspaces/{workspace_id}/time_entries",
base_url = self.base_url,
workspace_id = self.workspace_id
);
body.insert(
"workspace_id".to_string(),
self.workspace_id.parse::<i32>()?.into(),
);
self.make_request(self.client.post(url).json(&body)).await?;
Ok(())
}
/////////////
// Reports //
/////////////
fn paginate_filters(original_filters: &TogglReportQuery, last_row_id: u64) -> TogglReportQuery {
let mut filters: TogglReportQuery = original_filters.clone();
filters.first_row_number = Some(last_row_id + 1);
filters
}
#[instrument(skip(self, filters))]
pub async fn full_report(&self, filters: &TogglReportQuery) -> crate::Result<Vec<ReportRow>> {
let url = format!(
"{base_url}/workspace/{workspace_id}/search/time_entries",
base_url = self.reports_base_url,
workspace_id = self.workspace_id
);
let mut last_row_number = Some(0);
let mut results = vec![];
while let Some(last_row_number_n) = last_row_number {
debug!("Fetching page starting with {}", last_row_number_n);
// If we are not on the first page, wait a bit to avoid rate limiting
if last_row_number_n != 0 {
tokio::time::sleep(Duration::from_secs(1)).await;
}
// TODO: Implement rate limiting
let response = self
.make_request(
self.client
.post(&url)
.json(&Self::paginate_filters(filters, last_row_number_n)),
)
.await?;
let data = response.json::<Vec<ReportRow>>().await?;
last_row_number = data.last().map(|e| e.row_number as u64);
data.into_iter().for_each(|e| results.push(e));
}
Ok(results)
}
}

View File

@ -1,4 +0,0 @@
pub mod api_client;
pub mod types;
pub use api_client::TogglApiClient;

View File

@ -1,12 +1,21 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value; use serde_json::Value;
use serde_with::skip_serializing_none; use serde_with::skip_serializing_none;
use std::collections::HashMap; use std::collections::HashMap;
use std::option::Option; use std::option::Option;
use chrono::{DateTime, Utc};
#[derive(Clone, Serialize, Deserialize, Debug)] #[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ReportRow { pub struct TimeEntry {
pub id: u64,
pub seconds: u32,
pub start: String,
pub stop: String,
pub at: String,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ReportEntry {
pub user_id: u32, pub user_id: u32,
pub username: String, pub username: String,
pub project_id: Option<u64>, pub project_id: Option<u64>,
@ -17,40 +26,23 @@ pub struct ReportRow {
pub billable_amount_in_cents: Option<u64>, pub billable_amount_in_cents: Option<u64>,
pub hourly_rate_in_cents: Option<u64>, pub hourly_rate_in_cents: Option<u64>,
pub currency: String, pub currency: String,
pub time_entries: Vec<ReportRowInnerTimeEntry>, pub time_entries: Vec<TimeEntry>,
pub row_number: u32, pub row_number: u32,
} }
#[derive(Clone, Serialize, Deserialize, Debug)] #[derive(Debug, Serialize, Deserialize)]
pub struct ReportRowInnerTimeEntry { pub struct Current {
pub id: u64, pub id: u64,
pub seconds: u32, pub workspace_id: u64,
pub start: DateTime<Utc>, pub project_id: Option<u64>,
pub stop: DateTime<Utc>, pub task_id: Option<u64>,
pub at: DateTime<Utc>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimeEntry {
pub id: i64,
pub workspace_id: i64,
pub project_id: Option<i64>,
pub task_id: Option<i64>,
pub billable: bool, pub billable: bool,
pub start: DateTime<Utc>, pub start: String,
pub stop: Option<DateTime<Utc>>, pub stop: Option<String>,
pub duration: i64, pub duration: i64,
pub description: String, pub description: String,
pub tags: Vec<String>, pub tags: Vec<String>,
pub tag_ids: Vec<i64>, pub tag_ids: Vec<u64>,
pub at: DateTime<Utc>,
pub server_deleted_at: Option<DateTime<Utc>>,
pub user_id: i64,
// Ignored fields
// duronly: bool,
// uid: i64,
// wid: i64,
// pid: Option<i64>,
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@ -60,34 +52,14 @@ pub struct Project {
pub client_id: Option<u64>, pub client_id: Option<u64>,
pub name: String, pub name: String,
pub active: bool, pub active: bool,
pub color: String,
pub at: DateTime<Utc>, #[serde(flatten)]
pub server_deleted_at: Option<DateTime<Utc>>, pub rest: HashMap<String, Value>,
pub created_at: DateTime<Utc>,
// cid: Option<serde_json::Value>,
// wid: i64,
// rate: Option<serde_json::Value>,
// status: String,
// billable: Option<serde_json::Value>,
// currency: Option<serde_json::Value>,
// template: Option<serde_json::Value>,
// fixed_fee: Option<serde_json::Value>,
// recurring: bool,
// is_private: bool,
// start_date: String,
// template_id: Option<serde_json::Value>,
// actual_hours: i64,
// actual_seconds: i64,
// auto_estimates: Option<serde_json::Value>,
// estimated_hours: Option<serde_json::Value>,
// estimated_seconds: Option<serde_json::Value>,
// rate_last_updated: Option<serde_json::Value>,
// recurring_parameters: Option<serde_json::Value>,
} }
/// Represents a client in Toggl. /// Represents a client in Toggl.
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct Client { pub struct ProjectClient {
/// Indicates whether the client is archived or not. /// Indicates whether the client is archived or not.
pub archived: bool, pub archived: bool,
@ -107,9 +79,10 @@ pub struct Client {
pub wid: i32, pub wid: i32,
} }
#[allow(non_snake_case)]
#[skip_serializing_none] #[skip_serializing_none]
#[derive(Serialize, Deserialize, Clone, Default)] #[derive(Serialize, Deserialize, Clone, Default)]
pub struct TogglReportQuery { pub struct TogglQuery {
pub billable: Option<bool>, pub billable: Option<bool>,
pub client_ids: Option<Vec<u64>>, pub client_ids: Option<Vec<u64>>,
pub description: Option<String>, pub description: Option<String>,
@ -124,13 +97,11 @@ pub struct TogglReportQuery {
pub min_duration_seconds: Option<u64>, pub min_duration_seconds: Option<u64>,
pub order_by: Option<String>, pub order_by: Option<String>,
pub order_dir: Option<String>, pub order_dir: Option<String>,
#[serde(rename = "postedFields")] pub postedFields: Option<Vec<String>>,
pub posted_fields: Option<Vec<String>>,
pub project_ids: Option<Vec<u64>>, pub project_ids: Option<Vec<u64>>,
pub rounding: Option<u64>, pub rounding: Option<u64>,
pub rounding_minutes: Option<u64>, pub rounding_minutes: Option<u64>,
#[serde(rename = "startTime")] pub startTime: Option<String>,
pub start_time: Option<String>,
pub start_date: Option<String>, pub start_date: Option<String>,
pub tag_ids: Option<Vec<u64>>, pub tag_ids: Option<Vec<u64>>,
pub task_ids: Option<Vec<u64>>, pub task_ids: Option<Vec<u64>>,
@ -143,7 +114,7 @@ pub struct TogglReportQuery {
use std::fmt; use std::fmt;
impl fmt::Debug for TogglReportQuery { impl fmt::Debug for TogglQuery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut ds = f.debug_struct("TogglQuery"); let mut ds = f.debug_struct("TogglQuery");
@ -189,8 +160,8 @@ impl fmt::Debug for TogglReportQuery {
if let Some(order_dir) = &self.order_dir { if let Some(order_dir) = &self.order_dir {
ds.field("order_dir", order_dir); ds.field("order_dir", order_dir);
} }
if let Some(posted_fields) = &self.posted_fields { if let Some(postedFields) = &self.postedFields {
ds.field("postedFields", posted_fields); ds.field("postedFields", postedFields);
} }
if let Some(project_ids) = &self.project_ids { if let Some(project_ids) = &self.project_ids {
ds.field("project_ids", project_ids); ds.field("project_ids", project_ids);
@ -201,8 +172,8 @@ impl fmt::Debug for TogglReportQuery {
if let Some(rounding_minutes) = &self.rounding_minutes { if let Some(rounding_minutes) = &self.rounding_minutes {
ds.field("rounding_minutes", rounding_minutes); ds.field("rounding_minutes", rounding_minutes);
} }
if let Some(start_time) = &self.start_time { if let Some(startTime) = &self.startTime {
ds.field("startTime", start_time); ds.field("startTime", startTime);
} }
if let Some(start_date) = &self.start_date { if let Some(start_date) = &self.start_date {
ds.field("start_date", start_date); ds.field("start_date", start_date);

View File

@ -1,9 +1,5 @@
use crate::entity::time_entry;
use axum::http::StatusCode; use axum::http::StatusCode;
use axum::response::IntoResponse; use axum::response::IntoResponse;
use chrono::{NaiveDate, NaiveTime};
use migration::{Condition, IntoCondition};
use sea_orm::ColumnTrait;
use tokio::signal; use tokio::signal;
#[derive(Debug)] #[derive(Debug)]
@ -56,12 +52,3 @@ pub async fn shutdown_signal() {
_ = terminate => {}, _ = terminate => {},
} }
} }
pub fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition {
time_entry::Column::Start
.between(
start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()),
end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()),
)
.into_condition()
}