From 43399c28b1c5448f9b73dce856b297b4c720c048 Mon Sep 17 00:00:00 2001 From: Joshua Coles Date: Sat, 2 Mar 2024 09:17:19 +0000 Subject: [PATCH] Separate out routes --- src/main.rs | 189 +++----------------------------------------------- src/poll.rs | 5 +- src/routes.rs | 175 ++++++++++++++++++++++++++++++++++++++++++++++ src/utils.rs | 13 ++++ 4 files changed, 201 insertions(+), 181 deletions(-) create mode 100644 src/routes.rs diff --git a/src/main.rs b/src/main.rs index 558a20d..d919d11 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,7 +10,7 @@ use axum::http::StatusCode; use axum::response::IntoResponse; use axum::routing::{get, post}; use axum::{Extension, Json, Router}; -use utils::{shutdown_signal, Result}; +use utils::{Result, shutdown_signal}; use chrono::{NaiveDate, NaiveTime}; use clap::Parser; use migration::{Migrator, MigratorTrait}; @@ -29,6 +29,7 @@ mod poll; mod utils; mod csv_parser; mod toggl_api; +mod routes; #[derive(Debug, Clone, Parser)] struct Config { @@ -49,176 +50,6 @@ struct Config { poll_period: u64, } -#[instrument(skip(db, toggl_client))] -pub async fn report( - Extension(toggl_client): Extension, - Extension(db): Extension, - Json(query): Json, -) -> Result>> { - let report = toggl_client.full_report(&query).await?; - debug!("Returned results: {:?}", report); - - // We don't perform any deletes on report-fetched entries - cache_report(&db, &report, None).await?; - - Ok(Json(report)) -} - -#[instrument(skip_all)] -async fn cache_report( - db: &DatabaseConnection, - models: &Vec, - exclusive_on: Option, -) -> Result<()> { - let models = models.iter().flat_map(|entry| entry.as_models()); - let models = models.collect::>(); - let ids = models - .iter() - .map(|entry| entry.toggl_id.clone().unwrap()) - .collect::>(); - debug!("Caching report entries: {:?}", models); - - // TODO: Why is this needed? - if models.is_empty() { - return Ok(()); - } - - TimeEntry::insert_many(models) - .on_conflict(ReportEntry::grafting_conflict_statement()) - .exec(db) - .await?; - - if let Some(exclusive_on) = exclusive_on { - TimeEntry::delete_many() - .filter( - Condition::all() - .add(exclusive_on) - .add(time_entry::Column::TogglId.is_in(ids).not()), - ) - .exec(db) - .await?; - } - - Ok(()) -} - -#[instrument(skip(toggl_client))] -pub async fn current( - Extension(toggl_client): Extension, -) -> Result>> { - Ok(toggl_client.get_current().await.map(Json)?) -} - -#[instrument(skip(toggl_client))] -pub async fn start_time_entry( - Extension(toggl_client): Extension, - Json(body): Json>, -) -> Result { - toggl_client.start_time_entry(body).await?; - - Ok((StatusCode::OK, "Ok")) -} - -#[instrument(skip(db, toggl_client))] -async fn projects( - Extension(db): Extension, - Extension(toggl_client): Extension, -) -> Result>> { - let projects = toggl_client.fetch_projects().await?; - - entity::project::Entity::insert_many(projects.iter().map(Project::as_model)) - .on_conflict(Project::grafting_conflict_statement()) - .exec(&db) - .await?; - - Ok(Json(projects)) -} - -#[instrument(skip(toggl_client, db))] -async fn clients( - Extension(db): Extension, - Extension(toggl_client): Extension, -) -> Result>> { - let clients = toggl_client.fetch_clients().await?; - entity::client::Entity::insert_many(clients.iter().map(ProjectClient::as_model)) - .on_conflict(ProjectClient::grafting_conflict_statement()) - .exec(&db) - .await?; - - Ok(Json(clients)) -} - -async fn health(Extension(toggl_client): Extension) -> Result<&'static str> { - return if toggl_client.check_health().await { - Ok("Ok") - } else { - Err(anyhow!("Panopto health check failed").into()) - }; -} - -#[derive(Debug, Clone, Deserialize)] -struct RefreshQuery { - start_date: Option, -} - -#[instrument(skip(toggl_client, db))] -async fn refresh( - Extension(toggl_client): Extension, - Extension(db): Extension, - Query(RefreshQuery { start_date }): Query, -) -> Result<&'static str> { - let end_date = chrono::Utc::now(); - let end_date_query_string = end_date.date_naive().format("%Y-%m-%d").to_string(); - let start_date_query_string = start_date.unwrap_or(end_date_query_string.clone()); - let start_date = NaiveDate::parse_from_str(&start_date_query_string, "%Y-%m-%d")?; - - let query = TogglQuery { - start_date: Some(start_date_query_string), - end_date: Some(end_date_query_string), - ..Default::default() - }; - - let report = toggl_client.full_report(&query).await?; - let exclusivity_condition = day_exclusivity_condition(start_date, end_date.date_naive()); - cache_report(&db, &report, Some(exclusivity_condition)).await?; - - Ok("Ok") -} - -fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition { - time_entry::Column::Start - .between( - start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()), - end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()), - ) - .into_condition() -} - -fn from_csv_row(row: csv::StringRecord) -> ActiveModel { - unimplemented!("Need to refactor db first") -} - -async fn import_csv( - Extension(db): Extension, - mut multipart: Multipart, -) -> Result { - return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented")); - - // while let Some(field) = multipart.next_field().await? { - // // if let Some("csv") = field.name() { - // // let csv = field.bytes().await?; - // // let mut csv = csv::Reader::from_reader(csv.as_ref()); - // // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row); - // // - // // time_entry::Entity::insert_many(data.collect::>().unwrap()) - // // .on_conflict(ReportEntry::grafting_conflict_statement()) - // // .exec(&db) - // // .await - // // .unwrap() - // // } - // } -} - #[tokio::main] async fn main() -> Result<()> { // install global collector configured based on RUST_LOG env var. @@ -246,14 +77,14 @@ async fn main() -> Result<()> { // build our application with a route let app = Router::new() - .route("/import_csv", post(import_csv)) - .route("/health", get(health)) - .route("/current", get(current)) - .route("/refresh", post(refresh)) - .route("/report", post(report)) - .route("/projects", get(projects)) - .route("/clients", get(clients)) - .route("/start_time_entry", post(start_time_entry)) + .route("/import_csv", post(routes::import_csv)) + .route("/health", get(routes::health)) + .route("/current", get(routes::current)) + .route("/refresh", post(routes::refresh)) + .route("/report", post(routes::report)) + .route("/projects", get(routes::projects)) + .route("/clients", get(routes::clients)) + .route("/start_time_entry", post(routes::start_time_entry)) .layer(Extension(toggl_client)) .layer(Extension(db)) .layer(TraceLayer::new_for_http()); diff --git a/src/poll.rs b/src/poll.rs index 1267ab1..8e778ae 100644 --- a/src/poll.rs +++ b/src/poll.rs @@ -3,7 +3,8 @@ use crate::entity::{client, project, time_entry}; use crate::toggl_api::types::{Project, ProjectClient, TogglQuery}; use sea_orm::{DatabaseConnection, EntityTrait, QuerySelect}; use tracing::instrument; -use crate::{day_exclusivity_condition, utils}; +use crate::utils; +use crate::utils::day_exclusivity_condition; #[tracing::instrument(skip(client, db))] pub async fn poll_job(client: TogglApiClient, db: DatabaseConnection, poll_period: u64) { @@ -76,7 +77,7 @@ pub async fn perform_poll( .await?; } - crate::cache_report( + crate::routes::cache_report( &db, &report, Some( diff --git a/src/routes.rs b/src/routes.rs new file mode 100644 index 0000000..30988c1 --- /dev/null +++ b/src/routes.rs @@ -0,0 +1,175 @@ +use tracing::{debug, instrument}; +use axum::{Extension, Json}; +use std::collections::HashMap; +use serde_json::Value; +use axum::response::IntoResponse; +use axum::http::StatusCode; +use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter}; +use anyhow::anyhow; +use axum::extract::{Multipart, Query}; +use migration::{Condition, IntoCondition}; +use chrono::{NaiveDate, NaiveTime}; +use serde::Deserialize; +use crate::entity::time_entry::{ActiveModel, Entity as TimeEntry}; +use crate::toggl_api::TogglApiClient; +use crate::toggl_api::types::{Current, Project, ProjectClient, ReportEntry, TogglQuery}; +use crate::{entity, utils}; +use crate::entity::time_entry; + +#[instrument(skip(db, toggl_client))] +pub async fn report( + Extension(toggl_client): Extension, + Extension(db): Extension, + Json(query): Json, +) -> utils::Result>> { + let report = toggl_client.full_report(&query).await?; + debug!("Returned results: {:?}", report); + + // We don't perform any deletes on report-fetched entries as they aren't necessarily exclusive + // on their time range. + cache_report(&db, &report, None).await?; + + Ok(Json(report)) +} + +#[instrument(skip_all)] +pub async fn cache_report( + db: &DatabaseConnection, + models: &Vec, + exclusive_on: Option, +) -> utils::Result<()> { + let models = models.iter().flat_map(|entry| entry.as_models()); + let models = models.collect::>(); + let ids = models + .iter() + .map(|entry| entry.toggl_id.clone().unwrap()) + .collect::>(); + debug!("Caching report entries: {:?}", models); + + // TODO: Why is this needed? + if models.is_empty() { + return Ok(()); + } + + TimeEntry::insert_many(models) + .on_conflict(ReportEntry::grafting_conflict_statement()) + .exec(db) + .await?; + + if let Some(exclusive_on) = exclusive_on { + TimeEntry::delete_many() + .filter( + Condition::all() + .add(exclusive_on) + .add(time_entry::Column::TogglId.is_in(ids).not()), + ) + .exec(db) + .await?; + } + + Ok(()) +} + +#[instrument(skip(toggl_client))] +pub async fn current( + Extension(toggl_client): Extension, +) -> utils::Result>> { + Ok(toggl_client.get_current().await.map(Json)?) +} + +#[instrument(skip(toggl_client))] +pub async fn start_time_entry( + Extension(toggl_client): Extension, + Json(body): Json>, +) -> utils::Result { + toggl_client.start_time_entry(body).await?; + + Ok((StatusCode::OK, "Ok")) +} + +#[instrument(skip(db, toggl_client))] +pub async fn projects( + Extension(db): Extension, + Extension(toggl_client): Extension, +) -> utils::Result>> { + let projects = toggl_client.fetch_projects().await?; + + entity::project::Entity::insert_many(projects.iter().map(Project::as_model)) + .on_conflict(Project::grafting_conflict_statement()) + .exec(&db) + .await?; + + Ok(Json(projects)) +} + +#[instrument(skip(toggl_client, db))] +pub async fn clients( + Extension(db): Extension, + Extension(toggl_client): Extension, +) -> utils::Result>> { + let clients = toggl_client.fetch_clients().await?; + entity::client::Entity::insert_many(clients.iter().map(ProjectClient::as_model)) + .on_conflict(ProjectClient::grafting_conflict_statement()) + .exec(&db) + .await?; + + Ok(Json(clients)) +} + +pub async fn health(Extension(toggl_client): Extension) -> utils::Result<&'static str> { + return if toggl_client.check_health().await { + Ok("Ok") + } else { + Err(anyhow!("Panopto health check failed").into()) + }; +} + +pub async fn import_csv( + Extension(db): Extension, + mut multipart: Multipart, +) -> utils::Result { + return Ok((StatusCode::NOT_IMPLEMENTED, "Not implemented")); + + // while let Some(field) = multipart.next_field().await? { + // // if let Some("csv") = field.name() { + // // let csv = field.bytes().await?; + // // let mut csv = csv::Reader::from_reader(csv.as_ref()); + // // let data = csv.records().filter_map(|f| f.ok()).map(from_csv_row); + // // + // // time_entry::Entity::insert_many(data.collect::>().unwrap()) + // // .on_conflict(ReportEntry::grafting_conflict_statement()) + // // .exec(&db) + // // .await + // // .unwrap() + // // } + // } +} + +#[derive(Debug, Clone, Deserialize)] +struct RefreshQuery { + start_date: Option, +} + +#[instrument(skip(toggl_client, db))] +pub async fn refresh( + Extension(toggl_client): Extension, + Extension(db): Extension, + Query(RefreshQuery { start_date }): Query, +) -> utils::Result<&'static str> { + let end_date = chrono::Utc::now(); + let end_date_query_string = end_date.date_naive().format("%Y-%m-%d").to_string(); + let start_date_query_string = start_date.unwrap_or(end_date_query_string.clone()); + let start_date = NaiveDate::parse_from_str(&start_date_query_string, "%Y-%m-%d")?; + + let query = TogglQuery { + start_date: Some(start_date_query_string), + end_date: Some(end_date_query_string), + ..Default::default() + }; + + let report = toggl_client.full_report(&query).await?; + let exclusivity_condition = utils::day_exclusivity_condition(start_date, end_date.date_naive()); + cache_report(&db, &report, Some(exclusivity_condition)).await?; + + Ok("Ok") +} diff --git a/src/utils.rs b/src/utils.rs index fa48032..1817516 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,6 +1,10 @@ use axum::http::StatusCode; use axum::response::IntoResponse; use tokio::signal; +use chrono::{NaiveDate, NaiveTime}; +use migration::{Condition, IntoCondition}; +use sea_orm::ColumnTrait; +use crate::entity::time_entry; #[derive(Debug)] pub struct AppError(anyhow::Error); @@ -52,3 +56,12 @@ pub async fn shutdown_signal() { _ = terminate => {}, } } + +pub fn day_exclusivity_condition(start: NaiveDate, end: NaiveDate) -> Condition { + time_entry::Column::Start + .between( + start.and_time(NaiveTime::from_hms_opt(0, 0, 0).unwrap()), + end.and_time(NaiveTime::from_hms_opt(23, 59, 59).unwrap()), + ) + .into_condition() +}