Compare commits

...

10 Commits

11 changed files with 324 additions and 97 deletions

View File

@ -5,7 +5,7 @@ edition = "2021"
[dependencies]
tokio = { version = "1.0", features = ["full"] }
axum = "0.6.20"
axum = { version = "0.6.20", features = ["multipart"] }
clap = { version = "4.4.3", features = ["derive", "env"] }
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.106"
@ -31,4 +31,5 @@ migration = { path = "./migration" }
chrono = { version = "0.4.31", features = ["serde"] }
futures = "0.3.29"
rucron = "0.1.5"
csv = "1.3.0"
#tokio-cron-scheduler = "0.9.4"

View File

@ -8,7 +8,7 @@ migrate:
sea-orm-cli migrate up
generate-entity:
sea-orm-cli generate entity --with-serde both -o src/entity
sea-orm-cli generate entity --with-serde both -o src/entity --ignore-tables toggl_portal_seaql_migrations
release:
cargo build --release

View File

@ -29,13 +29,11 @@ impl MigrationTrait for Migration {
#[derive(DeriveIden)]
enum TimeEntry {
Table,
Id,
ProjectId,
}
#[derive(DeriveIden)]
enum Project {
Table,
Id,
TogglId,
}

View File

@ -2,7 +2,6 @@ use reqwest::Client;
use serde_json::Value;
use std::collections::HashMap;
use std::time::Duration;
use axum::http::StatusCode;
use hyper::HeaderMap;
use tracing::instrument;
use tracing::log::debug;

View File

@ -1,18 +1,34 @@
use crate::entity::{client, project, time_entry};
use crate::types::{Project, ProjectClient, ReportEntry, TimeEntry};
use sea_orm::sea_query::OnConflict;
use sea_orm::{NotSet, Set};
use crate::entity::{time_entry, client, project};
use crate::types::{Project, ProjectClient, ReportEntry};
impl ReportEntry {
pub(crate) fn as_models(&self) -> Vec<time_entry::ActiveModel> {
self.time_entries.iter().map(|inner| time_entry::ActiveModel {
id: NotSet,
toggl_id: Set(inner.id as i64),
description: Set(self.description.clone()),
project_id: Set(self.project_id.map(|id| id as i64)),
start: Set(chrono::DateTime::parse_from_rfc3339(&inner.start).unwrap()),
stop: Set(chrono::DateTime::parse_from_rfc3339(&inner.start).unwrap()),
raw_json: Set(serde_json::to_value(inner).unwrap()),
}).collect()
self.time_entries
.iter()
.map(|inner| time_entry::ActiveModel {
id: NotSet,
toggl_id: Set(inner.id as i64),
description: Set(self.description.clone()),
project_id: Set(self.project_id.map(|id| id as i64)),
start: Set(chrono::DateTime::parse_from_rfc3339(&inner.start).unwrap()),
stop: Set(chrono::DateTime::parse_from_rfc3339(&inner.stop).unwrap()),
raw_json: Set(serde_json::to_value(inner).unwrap()),
})
.collect()
}
pub fn grafting_conflict_statement() -> OnConflict {
OnConflict::column(time_entry::Column::TogglId)
.update_columns(vec![
time_entry::Column::Description,
time_entry::Column::ProjectId,
time_entry::Column::Start,
time_entry::Column::Stop,
time_entry::Column::RawJson,
])
.to_owned()
}
}
@ -27,6 +43,18 @@ impl ProjectClient {
server_deleted_at: Set(self.server_deleted_at.clone().map(|dt| dt.fixed_offset())),
}
}
pub fn grafting_conflict_statement() -> OnConflict {
OnConflict::column(client::Column::Id)
.update_columns(vec![
client::Column::Name,
client::Column::Archived,
client::Column::WorkspaceId,
client::Column::At,
client::Column::ServerDeletedAt,
])
.to_owned()
}
}
impl Project {
@ -41,4 +69,16 @@ impl Project {
raw_json: Set(serde_json::to_value(self).unwrap()),
}
}
pub fn grafting_conflict_statement() -> OnConflict {
OnConflict::column(project::Column::TogglId)
.update_columns(vec![
project::Column::Name,
project::Column::Active,
project::Column::ClientId,
project::Column::WorkspaceId,
project::Column::RawJson,
])
.to_owned()
}
}

View File

@ -5,4 +5,3 @@ pub mod prelude;
pub mod client;
pub mod project;
pub mod time_entry;
pub mod toggl_portal_seaql_migrations;

View File

@ -3,4 +3,3 @@
pub use super::client::Entity as Client;
pub use super::project::Entity as Project;
pub use super::time_entry::Entity as TimeEntry;
pub use super::toggl_portal_seaql_migrations::Entity as TogglPortalSeaqlMigrations;

View File

@ -1,17 +0,0 @@
//! `SeaORM` Entity. Generated by sea-orm-codegen 0.12.2
use sea_orm::entity::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)]
#[sea_orm(table_name = "toggl_portal_seaql_migrations")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub version: String,
pub applied_at: i64,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {}
impl ActiveModelBehavior for ActiveModel {}

View File

@ -1,7 +1,11 @@
use crate::client::TogglClient;
use crate::entity::time_entry::{self, Entity as TimeEntry};
use crate::entity::prelude::TimeEntry;
use crate::entity::time_entry;
use crate::entity::time_entry::ActiveModel;
use crate::types::{Current, Project, ProjectClient, ReportEntry, TogglQuery};
use anyhow::anyhow;
use axum::extract::multipart::Field;
use axum::extract::{Multipart, Query};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{get, post};
@ -9,14 +13,17 @@ use axum::{Extension, Json, Router};
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use beachhead::{shutdown_signal, Result};
use chrono::{NaiveDate, NaiveTime};
use clap::Parser;
use migration::{Migrator, MigratorTrait};
use sea_orm::sea_query::OnConflict;
use sea_orm::{DatabaseConnection, EntityTrait};
use sea_orm::sea_query::IntoCondition;
use sea_orm::{ColumnTrait, Condition, DatabaseConnection, EntityTrait, QueryFilter};
use serde::Deserialize;
use serde_json::Value;
use std::collections::HashMap;
use std::net::SocketAddr;
use tower_http::trace::TraceLayer;
use tracing::{debug, instrument};
mod client;
mod db;
@ -37,45 +44,73 @@ struct Config {
#[arg(long = "db", short, env)]
database_url: String,
/// How often to poll the Toggl API for new time entries
#[arg(long, short, env, default_value = "7200")]
poll_period: u64,
}
#[instrument(skip(db, toggl_client))]
pub async fn report(
Extension(toggl_client): Extension<TogglClient>,
Extension(db): Extension<DatabaseConnection>,
Json(query): Json<TogglQuery>,
) -> Result<Json<Vec<ReportEntry>>> {
let report = toggl_client.full_report(&query).await?;
cache_report(&db, &report).await?;
debug!("Returned results: {:?}", report);
// We don't perform any deletes on report-fetched entries
cache_report(&db, &report, None).await?;
Ok(Json(report))
}
async fn cache_report(db: &DatabaseConnection, models: &Vec<ReportEntry>) -> Result<()> {
#[instrument(skip_all)]
async fn cache_report(
db: &DatabaseConnection,
models: &Vec<ReportEntry>,
exclusive_on: Option<Condition>,
) -> Result<()> {
let models = models.iter().flat_map(|entry| entry.as_models());
let models = models.collect::<Vec<_>>();
let ids = models
.iter()
.map(|entry| entry.toggl_id.clone().unwrap())
.collect::<Vec<_>>();
debug!("Caching report entries: {:?}", models);
// TODO: Why is this needed?
if models.is_empty() {
return Ok(());
}
TimeEntry::insert_many(models)
.on_conflict(
OnConflict::column(time_entry::Column::TogglId)
.update_columns(vec![
time_entry::Column::Description,
time_entry::Column::ProjectId,
time_entry::Column::Start,
time_entry::Column::Stop,
time_entry::Column::RawJson,
])
.to_owned(),
)
.on_conflict(ReportEntry::grafting_conflict_statement())
.exec(db)
.await?;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(())
}
#[instrument(skip(toggl_client))]
pub async fn current(
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Option<Current>>> {
Ok(toggl_client.get_current().await.map(Json)?)
}
#[instrument(skip(toggl_client))]
pub async fn start_time_entry(
Extension(toggl_client): Extension<TogglClient>,
Json(body): Json<HashMap<String, Value>>,
@ -85,47 +120,31 @@ pub async fn start_time_entry(
Ok((StatusCode::OK, "Ok"))
}
#[instrument(skip(db, toggl_client))]
async fn projects(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglClient>
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Vec<Project>>> {
let projects = toggl_client.fetch_projects().await?;
entity::project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(
OnConflict::column(entity::project::Column::TogglId)
.update_columns(vec![
entity::project::Column::Name,
entity::project::Column::Active,
entity::project::Column::ClientId,
entity::project::Column::WorkspaceId,
entity::project::Column::RawJson,
])
.to_owned(),
)
.exec(&db).await?;
entity::project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(projects))
}
#[instrument(skip(toggl_client, db))]
async fn clients(
Extension(db): Extension<DatabaseConnection>,
Extension(toggl_client): Extension<TogglClient>
Extension(toggl_client): Extension<TogglClient>,
) -> Result<Json<Vec<ProjectClient>>> {
let clients = toggl_client.fetch_clients().await?;
entity::client::Entity::insert_many(clients.iter().map(ProjectClient::as_model))
.on_conflict(
OnConflict::column(entity::client::Column::Id)
.update_columns(vec![
entity::client::Column::Name,
entity::client::Column::Archived,
entity::client::Column::At,
entity::client::Column::ServerDeletedAt,
entity::client::Column::WorkspaceId,
])
.to_owned(),
)
.exec(&db).await?;
.on_conflict(ProjectClient::grafting_conflict_statement())
.exec(&db)
.await?;
Ok(Json(clients))
}
@ -138,6 +157,69 @@ async fn health(Extension(toggl_client): Extension<TogglClient>) -> Result<&'sta
};
}
#[derive(Debug, Clone, Deserialize)]
struct RefreshQuery {
start_date: Option<String>,
}
#[instrument(skip(toggl_client, db))]
async fn refresh(
Extension(toggl_client): Extension<TogglClient>,
Extension(db): Extension<DatabaseConnection>,
Query(RefreshQuery { start_date }): Query<RefreshQuery>,
) -> Result<&'static str> {
let end_date = chrono::Utc::now();
let end_date_query_string = end_date.date_naive().format("%Y-%m-%d").to_string();
let start_date_query_string = start_date.unwrap_or(end_date_query_string.clone());
let start_date = NaiveDate::parse_from_str(&start_date_query_string, "%Y-%m-%d")?;
let query = TogglQuery {
start_date: Some(start_date_query_string),
end_date: Some(end_date_query_string),
..Default::default()
};
let report = toggl_client.full_report(&query).await?;
let exclusivity_condition = day_exclusivity_condition(start_date, end_date.date_naive());
cache_report(&db, &report, Some(exclusivity_condition)).await?;
Ok("Ok")
}
fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition {
time_entry::Column::Start
.between(
start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()),
end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()),
)
.into_condition()
}
fn from_csv_row(row: csv::StringRecord) -> ActiveModel {
unimplemented!("Need to refactor db first")
}
async fn import_csv(
Extension(db): Extension<DatabaseConnection>,
mut multipart: Multipart,
) -> beachhead::Result<impl IntoResponse> {
return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented"));
// while let Some(field) = multipart.next_field().await? {
// // if let Some("csv") = field.name() {
// // let csv = field.bytes().await?;
// // let mut csv = csv::Reader::from_reader(csv.as_ref());
// // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row);
// //
// // time_entry::Entity::insert_many(data.collect::<Result<_>>().unwrap())
// // .on_conflict(ReportEntry::grafting_conflict_statement())
// // .exec(&db)
// // .await
// // .unwrap()
// // }
// }
}
#[tokio::main]
async fn main() -> Result<()> {
// install global collector configured based on RUST_LOG env var.
@ -155,12 +237,18 @@ async fn main() -> Result<()> {
Migrator::up(&db, None).await.expect("Failed to migrate");
tokio::spawn(poll::poll_job(toggl_client.clone(), db.clone()));
tokio::spawn(poll::poll_job(
toggl_client.clone(),
db.clone(),
config.poll_period,
));
// build our application with a route
let app = Router::new()
.route("/import_csv", post(import_csv))
.route("/health", get(health))
.route("/current", get(current))
.route("/refresh", post(refresh))
.route("/report", post(report))
.route("/projects", get(projects))
.route("/clients", get(clients))

View File

@ -1,12 +1,14 @@
use crate::client::TogglClient;
use crate::types::TogglQuery;
use sea_orm::DatabaseConnection;
use crate::entity::{client, project, time_entry};
use crate::types::{Project, ProjectClient, TogglQuery};
use sea_orm::{DatabaseConnection, EntityTrait, QuerySelect};
use tracing::instrument;
use crate::day_exclusivity_condition;
#[tracing::instrument(skip(client, db))]
pub async fn poll_job(client: TogglClient, db: DatabaseConnection) {
pub async fn poll_job(client: TogglClient, db: DatabaseConnection, poll_period: u64) {
// Every 2h, poll the Toggl API for new time entries for today to cache them in the database
let period = tokio::time::Duration::from_secs(60 * 60 * 2);
let period = tokio::time::Duration::from_secs(poll_period);
loop {
tracing::info!("Polling Toggl API");
@ -32,24 +34,55 @@ pub async fn perform_poll(
client: &TogglClient,
db: &DatabaseConnection,
) -> beachhead::Result<usize> {
let now = chrono::Utc::now();
let today_string = now
.date_naive()
.format("%Y-%m-%d")
.to_string();
let report = client
.full_report(&TogglQuery {
start_date: Some(
chrono::Utc::now()
.date_naive()
.format("%Y-%m-%d")
.to_string(),
),
end_date: Some(
chrono::Utc::now()
.date_naive()
.format("%Y-%m-%d")
.to_string(),
),
start_date: Some(today_string.clone()),
end_date: Some(today_string.clone()),
..Default::default()
})
.await?;
crate::cache_report(&db, &report).await?;
let existing_project_ids = project::Entity::find()
.select_only()
.column(project::Column::TogglId)
.into_tuple::<i64>()
.all(db)
.await?;
let new_projects = report
.iter()
.filter_map(|entry| entry.project_id)
.any(|project_id| !existing_project_ids.contains(&(project_id as i64)));
if new_projects {
let clients = client.fetch_clients().await?;
client::Entity::insert_many(clients.iter().map(ProjectClient::as_model))
.on_conflict(ProjectClient::grafting_conflict_statement())
.exec(db)
.await?;
let projects = client.fetch_projects().await?;
project::Entity::insert_many(projects.iter().map(Project::as_model))
.on_conflict(Project::grafting_conflict_statement())
.exec(db)
.await?;
}
crate::cache_report(
&db,
&report,
Some(
day_exclusivity_condition(now.date_naive(), now.date_naive()),
),
)
.await?;
Ok(report.len())
}

View File

@ -81,7 +81,7 @@ pub struct ProjectClient {
#[allow(non_snake_case)]
#[skip_serializing_none]
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
#[derive(Serialize, Deserialize, Clone, Default)]
pub struct TogglQuery {
pub billable: Option<bool>,
pub client_ids: Option<Vec<u64>>,
@ -111,3 +111,90 @@ pub struct TogglQuery {
#[serde(flatten)]
pub rest: HashMap<String, Value>,
}
use std::fmt;
impl fmt::Debug for TogglQuery {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut ds = f.debug_struct("TogglQuery");
if let Some(billable) = &self.billable {
ds.field("billable", billable);
}
if let Some(client_ids) = &self.client_ids {
ds.field("client_ids", client_ids);
}
if let Some(description) = &self.description {
ds.field("description", description);
}
if let Some(end_date) = &self.end_date {
ds.field("end_date", end_date);
}
if let Some(first_id) = &self.first_id {
ds.field("first_id", first_id);
}
if let Some(first_row_number) = &self.first_row_number {
ds.field("first_row_number", first_row_number);
}
if let Some(first_timestamp) = &self.first_timestamp {
ds.field("first_timestamp", first_timestamp);
}
if let Some(group_ids) = &self.group_ids {
ds.field("group_ids", group_ids);
}
if let Some(grouped) = &self.grouped {
ds.field("grouped", grouped);
}
if let Some(hide_amounts) = &self.hide_amounts {
ds.field("hide_amounts", hide_amounts);
}
if let Some(max_duration_seconds) = &self.max_duration_seconds {
ds.field("max_duration_seconds", max_duration_seconds);
}
if let Some(min_duration_seconds) = &self.min_duration_seconds {
ds.field("min_duration_seconds", min_duration_seconds);
}
if let Some(order_by) = &self.order_by {
ds.field("order_by", order_by);
}
if let Some(order_dir) = &self.order_dir {
ds.field("order_dir", order_dir);
}
if let Some(postedFields) = &self.postedFields {
ds.field("postedFields", postedFields);
}
if let Some(project_ids) = &self.project_ids {
ds.field("project_ids", project_ids);
}
if let Some(rounding) = &self.rounding {
ds.field("rounding", rounding);
}
if let Some(rounding_minutes) = &self.rounding_minutes {
ds.field("rounding_minutes", rounding_minutes);
}
if let Some(startTime) = &self.startTime {
ds.field("startTime", startTime);
}
if let Some(start_date) = &self.start_date {
ds.field("start_date", start_date);
}
if let Some(tag_ids) = &self.tag_ids {
ds.field("tag_ids", tag_ids);
}
if let Some(task_ids) = &self.task_ids {
ds.field("task_ids", task_ids);
}
if let Some(time_entry_ids) = &self.time_entry_ids {
ds.field("time_entry_ids", time_entry_ids);
}
if let Some(user_ids) = &self.user_ids {
ds.field("user_ids", user_ids);
}
if !self.rest.is_empty() {
ds.field("rest", &self.rest);
}
ds.finish()
}
}