Run rustfmt and move cache_report into sync_service.rs

This commit is contained in:
Joshua Coles 2024-03-03 19:53:20 +00:00
parent 73b3e2cb96
commit 889859dbae
13 changed files with 290 additions and 220 deletions

View File

@ -19,11 +19,23 @@ impl MigrationTrait for Migration {
.primary_key(),
)
.col(
ColumnDef::new(TimeEntry::TogglId).big_unsigned().not_null().unique_key())
ColumnDef::new(TimeEntry::TogglId)
.big_unsigned()
.not_null()
.unique_key(),
)
.col(ColumnDef::new(TimeEntry::Description).string().not_null())
.col(ColumnDef::new(TimeEntry::ProjectId).big_unsigned())
.col(ColumnDef::new(TimeEntry::Start).timestamp_with_time_zone().not_null())
.col(ColumnDef::new(TimeEntry::Stop).timestamp_with_time_zone().not_null())
.col(
ColumnDef::new(TimeEntry::Start)
.timestamp_with_time_zone()
.not_null(),
)
.col(
ColumnDef::new(TimeEntry::Stop)
.timestamp_with_time_zone()
.not_null(),
)
.col(ColumnDef::new(TimeEntry::RawJson).json_binary().not_null())
.to_owned(),
)
@ -46,5 +58,5 @@ enum TimeEntry {
ProjectId,
Start,
Stop,
RawJson
RawJson,
}

View File

@ -20,7 +20,11 @@ impl MigrationTrait for Migration {
.col(ColumnDef::new(Client::Name).string().not_null())
.col(ColumnDef::new(Client::Archived).boolean().not_null())
.col(ColumnDef::new(Client::WorkspaceId).integer().not_null())
.col(ColumnDef::new(Client::At).timestamp_with_time_zone().not_null())
.col(
ColumnDef::new(Client::At)
.timestamp_with_time_zone()
.not_null(),
)
.col(ColumnDef::new(Client::ServerDeletedAt).timestamp_with_time_zone())
.to_owned(),
)
@ -42,5 +46,5 @@ enum Client {
Archived,
WorkspaceId,
At,
ServerDeletedAt
ServerDeletedAt,
}

View File

@ -11,8 +11,19 @@ impl MigrationTrait for Migration {
Table::create()
.table(Project::Table)
.if_not_exists()
.col(ColumnDef::new(Project::Id).integer().primary_key().auto_increment().not_null())
.col(ColumnDef::new(Project::TogglId).big_unsigned().not_null().unique_key())
.col(
ColumnDef::new(Project::Id)
.integer()
.primary_key()
.auto_increment()
.not_null(),
)
.col(
ColumnDef::new(Project::TogglId)
.big_unsigned()
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(Project::WorkspaceId)
.big_unsigned()
@ -27,13 +38,15 @@ impl MigrationTrait for Migration {
.await?;
// Create foreign key
manager.create_foreign_key(
ForeignKey::create()
.name("project_client_id")
.from(Project::Table, Project::ClientId)
.to(Client::Table, Client::Id)
.to_owned(),
).await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("project_client_id")
.from(Project::Table, Project::ClientId)
.to(Client::Table, Client::Id)
.to_owned(),
)
.await?;
Ok(())
}

View File

@ -6,46 +6,55 @@ pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.add_column(ColumnDef::new(Project::Color).text())
.add_column(ColumnDef::new(Project::ServerCreatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerUpdatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerDeletedAt).timestamp_with_time_zone())
.to_owned()
).await?;
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.add_column(ColumnDef::new(Project::Color).text())
.add_column(ColumnDef::new(Project::ServerCreatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerUpdatedAt).timestamp_with_time_zone())
.add_column(ColumnDef::new(Project::ServerDeletedAt).timestamp_with_time_zone())
.to_owned(),
)
.await?;
manager.get_connection().execute_unprepared(
r#"
manager
.get_connection()
.execute_unprepared(
r#"
update "project"
set "color" = raw_json ->> 'color',
"server_created_at" = (raw_json ->> 'created_at') :: timestamptz,
"server_updated_at" = (raw_json ->> 'at') :: timestamptz,
"server_deleted_at" = (raw_json ->> 'server_deleted_at') :: timestamptz
"#,
).await?;
)
.await?;
manager.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.modify_column(ColumnDef::new(Project::Color).not_null())
.modify_column(ColumnDef::new(Project::ServerCreatedAt).not_null())
.modify_column(ColumnDef::new(Project::ServerUpdatedAt).not_null())
.to_owned()
).await
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.modify_column(ColumnDef::new(Project::Color).not_null())
.modify_column(ColumnDef::new(Project::ServerCreatedAt).not_null())
.modify_column(ColumnDef::new(Project::ServerUpdatedAt).not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.drop_column(Project::Color)
.drop_column(Project::ServerCreatedAt)
.drop_column(Project::ServerUpdatedAt)
.drop_column(Project::ServerDeletedAt)
.to_owned()
).await
manager
.alter_table(
TableAlterStatement::new()
.table(Project::Table)
.drop_column(Project::Color)
.drop_column(Project::ServerCreatedAt)
.drop_column(Project::ServerUpdatedAt)
.drop_column(Project::ServerDeletedAt)
.to_owned(),
)
.await
}
}

View File

@ -6,44 +6,58 @@ pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.alter_table(TableAlterStatement::new()
.table(TimeEntry::Table)
.add_column(ColumnDef::new(TimeEntry::Tags)
.json_binary()
.default(serde_json::json!([]))
.not_null()
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.add_column(
ColumnDef::new(TimeEntry::Tags)
.json_binary()
.default(serde_json::json!([]))
.not_null(),
)
.add_column(
ColumnDef::new(TimeEntry::ServerUpdatedAt).timestamp_with_time_zone(),
)
.add_column(
ColumnDef::new(TimeEntry::ServerDeletedAt).timestamp_with_time_zone(),
)
.to_owned(),
)
.add_column(ColumnDef::new(TimeEntry::ServerUpdatedAt)
.timestamp_with_time_zone())
.add_column(ColumnDef::new(TimeEntry::ServerDeletedAt)
.timestamp_with_time_zone())
.to_owned()
).await?;
.await?;
manager.get_connection().execute_unprepared(
r#"
manager
.get_connection()
.execute_unprepared(
r#"
update "time_entry"
set "tags" = coalesce(raw_json -> 'tags', '[]' :: jsonb),
"server_updated_at" = (raw_json ->> 'at') :: timestamptz;
"#,
).await?;
)
.await?;
manager.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.modify_column(ColumnDef::new(TimeEntry::ServerUpdatedAt).not_null())
.to_owned()
).await
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.modify_column(ColumnDef::new(TimeEntry::ServerUpdatedAt).not_null())
.to_owned(),
)
.await
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.alter_table(TableAlterStatement::new()
.table(TimeEntry::Table)
.drop_column(TimeEntry::Tags)
.drop_column(TimeEntry::ServerDeletedAt)
.drop_column(TimeEntry::ServerUpdatedAt)
.to_owned()
).await
manager
.alter_table(
TableAlterStatement::new()
.table(TimeEntry::Table)
.drop_column(TimeEntry::Tags)
.drop_column(TimeEntry::ServerDeletedAt)
.drop_column(TimeEntry::ServerUpdatedAt)
.to_owned(),
)
.await
}
}

View File

@ -1,7 +1,7 @@
use crate::utils::Result;
use anyhow::anyhow;
use chrono::{NaiveDate, NaiveTime};
use csv::StringRecord;
use crate::utils::Result;
mod headings {
pub const USER: usize = 1;
@ -20,10 +20,18 @@ mod headings {
}
fn parse_csv_row(row: StringRecord) -> Result<crate::entity::time_entry::Model> {
let start_date = row.get(headings::START_DATE).ok_or(anyhow!("Missing start date in CSV"))?;
let start_time = row.get(headings::START_TIME).ok_or(anyhow!("Missing start time in CSV"))?;
let end_date = row.get(headings::END_DATE).ok_or(anyhow!("Missing end date in CSV"))?;
let end_time = row.get(headings::END_TIME).ok_or(anyhow!("Missing end time in CSV"))?;
let start_date = row
.get(headings::START_DATE)
.ok_or(anyhow!("Missing start date in CSV"))?;
let start_time = row
.get(headings::START_TIME)
.ok_or(anyhow!("Missing start time in CSV"))?;
let end_date = row
.get(headings::END_DATE)
.ok_or(anyhow!("Missing end date in CSV"))?;
let end_time = row
.get(headings::END_TIME)
.ok_or(anyhow!("Missing end time in CSV"))?;
let start_time = NaiveTime::parse_from_str(start_time, "%H:%M:%S")?;
let end_time = NaiveTime::parse_from_str(end_time, "%H:%M:%S")?;
@ -33,15 +41,28 @@ fn parse_csv_row(row: StringRecord) -> Result<crate::entity::time_entry::Model>
let start = start_date.and_time(start_time);
let end = end_date.and_time(end_time);
let description = row.get(headings::DESCRIPTION).ok_or(anyhow!("Missing description in CSV"))?;
let project_name = row.get(headings::PROJECT_NAME).ok_or(anyhow!("Missing project name in CSV"))?;
let client_name = row.get(headings::CLIENT_NAME).ok_or(anyhow!("Missing client name in CSV"))?;
let tags = row.get(headings::TAGS).ok_or(anyhow!("Missing tags in CSV"))?;
let task_name = row.get(headings::TASK_NAME).ok_or(anyhow!("Missing task name in CSV"))?;
let billable = match row.get(headings::BILLABLE).ok_or(anyhow!("Missing billable in CSV"))? {
let description = row
.get(headings::DESCRIPTION)
.ok_or(anyhow!("Missing description in CSV"))?;
let project_name = row
.get(headings::PROJECT_NAME)
.ok_or(anyhow!("Missing project name in CSV"))?;
let client_name = row
.get(headings::CLIENT_NAME)
.ok_or(anyhow!("Missing client name in CSV"))?;
let tags = row
.get(headings::TAGS)
.ok_or(anyhow!("Missing tags in CSV"))?;
let task_name = row
.get(headings::TASK_NAME)
.ok_or(anyhow!("Missing task name in CSV"))?;
let billable = match row
.get(headings::BILLABLE)
.ok_or(anyhow!("Missing billable in CSV"))?
{
"Yes" => true,
"No" => false,
_ => unimplemented!("Unknown billable value")
_ => unimplemented!("Unknown billable value"),
};
unimplemented!("Refactor model to support non-json sources")

View File

@ -1,5 +1,5 @@
use crate::entity::{client, project, time_entry};
use crate::toggl_api::types::{Project, Client, ReportRow, TimeEntry};
use crate::toggl_api::types::{Client, Project, ReportRow, TimeEntry};
use sea_orm::sea_query::OnConflict;
use sea_orm::{NotSet, Set};
@ -18,7 +18,7 @@ impl ReportRow {
server_deleted_at: NotSet,
server_updated_at: Set(inner.at.fixed_offset()),
// TODO: tags on report row import, need to track in separate table
tags: NotSet
tags: NotSet,
})
.collect()
}

View File

@ -1,19 +1,20 @@
use crate::toggl_api::TogglApiClient;
use axum::routing::{get, post};
use axum::{Extension, Router};
use utils::{Result, shutdown_signal};
use clap::Parser;
use migration::{Migrator, MigratorTrait};
use std::net::SocketAddr;
use tower_http::trace::TraceLayer;
use utils::{shutdown_signal, Result};
mod csv_parser;
mod db;
mod entity;
mod poll;
mod utils;
mod csv_parser;
mod toggl_api;
mod routes;
mod sync_service;
mod toggl_api;
mod utils;
#[derive(Debug, Clone, Parser)]
struct Config {
@ -40,18 +41,14 @@ async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let config = Config::parse();
let toggl_client = TogglApiClient::new(
&config.workspace_id.to_string(),
&config.toggl_api_token,
);
let toggl_client =
TogglApiClient::new(&config.workspace_id.to_string(), &config.toggl_api_token);
let db = sea_orm::Database::connect(config.database_url)
.await
.unwrap();
Migrator::up(&db, None)
.await
.expect("Failed to migrate");
Migrator::up(&db, None).await.expect("Failed to migrate");
tokio::spawn(poll::poll_job(
toggl_client.clone(),

View File

@ -1,20 +1,20 @@
use tracing::{debug, instrument};
use axum::{Extension, Json};
use std::collections::HashMap;
use serde_json::Value;
use axum::response::IntoResponse;
use axum::http::StatusCode;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
use crate::entity::time_entry;
use crate::entity::time_entry::Entity as TimeEntry;
use crate::toggl_api::types::{self, Client, Project, ReportRow, TogglReportQuery};
use crate::toggl_api::TogglApiClient;
use crate::{entity, sync_service, utils};
use anyhow::anyhow;
use axum::extract::{Multipart, Query};
use migration::{Condition};
use chrono::{NaiveDate};
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::{Extension, Json};
use chrono::NaiveDate;
use migration::Condition;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
use serde::Deserialize;
use crate::entity::time_entry::{Entity as TimeEntry};
use crate::toggl_api::TogglApiClient;
use crate::toggl_api::types::{self, Project, Client, ReportRow, TogglReportQuery};
use crate::{entity, utils};
use crate::entity::time_entry;
use serde_json::Value;
use std::collections::HashMap;
use tracing::{debug, instrument};
#[instrument(skip(db, toggl_client))]
pub async fn report(
@ -27,49 +27,11 @@ pub async fn report(
// We don't perform any deletes on report-fetched entries as they aren't necessarily exclusive
// on their time range.
cache_report(&db, &report, None).await?;
sync_service::cache_report(&db, &report, None).await?;
Ok(Json(report))
}
#[instrument(skip_all)]
pub async fn cache_report(
db: &DatabaseConnection,
models: &Vec<ReportRow>,
exclusive_on: Option<Condition>,
) -> utils::Result<()> {
let models = models.iter().flat_map(|entry| entry.as_models());
let models = models.collect::<Vec<_>>();
let ids = models
.iter()
.map(|entry| entry.toggl_id.clone().unwrap())
.collect::<Vec<_>>();
debug!("Caching report entries: {:?}", models);
// TODO: Why is this needed?
if models.is_empty() {
return Ok(());
}
TimeEntry::insert_many(models)
.on_conflict(ReportRow::grafting_conflict_statement())
.exec(db)
.await?;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(())
}
#[instrument(skip(toggl_client))]
pub async fn current(
Extension(toggl_client): Extension<TogglApiClient>,
@ -116,7 +78,9 @@ pub async fn clients(
Ok(Json(clients))
}
pub async fn health(Extension(toggl_client): Extension<TogglApiClient>) -> utils::Result<&'static str> {
pub async fn health(
Extension(toggl_client): Extension<TogglApiClient>,
) -> utils::Result<&'static str> {
return if toggl_client.check_health().await {
Ok("Ok")
} else {
@ -169,7 +133,7 @@ pub async fn refresh(
let report = toggl_client.full_report(&query).await?;
let exclusivity_condition = utils::day_exclusivity_condition(start_date, end_date.date_naive());
cache_report(&db, &report, Some(exclusivity_condition)).await?;
sync_service::cache_report(&db, &report, Some(exclusivity_condition)).await?;
Ok("Ok")
}

45
src/sync_service.rs Normal file
View File

@ -0,0 +1,45 @@
use crate::entity::time_entry;
use crate::entity::time_entry::Entity as TimeEntry;
use crate::toggl_api::types::ReportRow;
use crate::utils;
use migration::Condition;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
use tracing::{debug, instrument};
#[instrument(skip_all)]
pub async fn cache_report(
db: &DatabaseConnection,
models: &Vec<ReportRow>,
exclusive_on: Option<Condition>,
) -> utils::Result<()> {
let models = models.iter().flat_map(|entry| entry.as_models());
let models = models.collect::<Vec<_>>();
let ids = models
.iter()
.map(|entry| entry.toggl_id.clone().unwrap())
.collect::<Vec<_>>();
debug!("Caching report entries: {:?}", models);
// TODO: Why is this needed?
if models.is_empty() {
return Ok(());
}
TimeEntry::insert_many(models)
.on_conflict(ReportRow::grafting_conflict_statement())
.exec(db)
.await?;
if let Some(exclusive_on) = exclusive_on {
TimeEntry::delete_many()
.filter(
Condition::all()
.add(exclusive_on)
.add(time_entry::Column::TogglId.is_in(ids).not()),
)
.exec(db)
.await?;
}
Ok(())
}

View File

@ -1,17 +1,19 @@
use crate::toggl_api::types::{
Client as ProjectClient, Project, ReportRow, TimeEntry, TogglReportQuery,
};
use anyhow::anyhow;
use axum::http::StatusCode;
use base64::engine::general_purpose::STANDARD;
use base64::Engine;
use chrono::{DateTime, Utc};
use hyper::HeaderMap;
use reqwest::header::HeaderValue;
use reqwest::{Client, RequestBuilder, Response};
use serde_json::Value;
use std::collections::HashMap;
use std::time::Duration;
use anyhow::anyhow;
use axum::http::StatusCode;
use base64::Engine;
use base64::engine::general_purpose::STANDARD;
use chrono::{DateTime, Utc};
use hyper::HeaderMap;
use reqwest::header::HeaderValue;
use tracing::instrument;
use tracing::log::debug;
use crate::toggl_api::types::{TimeEntry, Project, Client as ProjectClient, ReportRow, TogglReportQuery};
#[derive(Debug, Clone)]
pub struct TogglApiClient {
@ -24,7 +26,9 @@ pub struct TogglApiClient {
impl TogglApiClient {
async fn make_request(&self, request_builder: RequestBuilder) -> crate::Result<Response> {
loop {
let builder = request_builder.try_clone().ok_or(anyhow!("Failed to clone request builder"))?;
let builder = request_builder
.try_clone()
.ok_or(anyhow!("Failed to clone request builder"))?;
let response = self.client.execute(builder.build()?).await?;
// If we are rate limited, wait a bit and try again
@ -60,10 +64,7 @@ impl TogglApiClient {
let mut headers = HeaderMap::new();
let mut value = HeaderValue::from_str(&format!("Basic {}", toggl_auth)).unwrap();
value.set_sensitive(true);
headers.insert(
"Authorization",
value,
);
headers.insert("Authorization", value);
headers
}
@ -74,9 +75,8 @@ impl TogglApiClient {
base_url = self.base_url,
);
let projects = self.make_request(self
.client
.get(&url))
let projects = self
.make_request(self.client.get(&url))
.await?
.json::<Vec<Project>>()
.await?;
@ -91,9 +91,8 @@ impl TogglApiClient {
base_url = self.base_url,
);
let clients = self.make_request(self
.client
.get(&url))
let clients = self
.make_request(self.client.get(&url))
.await?
.json::<Vec<ProjectClient>>()
.await?;
@ -101,37 +100,37 @@ impl TogglApiClient {
Ok(clients)
}
pub async fn fetch_time_entries_modified_since(&self, date_time: DateTime<Utc>) -> crate::Result<Vec<TimeEntry>> {
let url = format!(
"{base_url}/me/time_entries",
base_url = self.base_url
);
pub async fn fetch_time_entries_modified_since(
&self,
date_time: DateTime<Utc>,
) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(
self.make_request(self.client.get(url)
.query(&[("since", date_time.timestamp())]))
.await?
.json::<Vec<TimeEntry>>()
.await?
)
Ok(self
.make_request(
self.client
.get(url)
.query(&[("since", date_time.timestamp())]),
)
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_time_entries_in_range(&self, (start, end): (DateTime<Utc>, DateTime<Utc>)) -> crate::Result<Vec<TimeEntry>> {
let url = format!(
"{base_url}/me/time_entries",
base_url = self.base_url
);
pub async fn fetch_time_entries_in_range(
&self,
(start, end): (DateTime<Utc>, DateTime<Utc>),
) -> crate::Result<Vec<TimeEntry>> {
let url = format!("{base_url}/me/time_entries", base_url = self.base_url);
Ok(
self.make_request(self.client.get(url)
.query(&[
("start_date", start.to_rfc3339()),
("end_date", end.to_rfc3339())
]))
.await?
.json::<Vec<TimeEntry>>()
.await?
)
Ok(self
.make_request(self.client.get(url).query(&[
("start_date", start.to_rfc3339()),
("end_date", end.to_rfc3339()),
]))
.await?
.json::<Vec<TimeEntry>>()
.await?)
}
pub async fn fetch_current_time_entry(&self) -> crate::Result<Option<TimeEntry>> {
@ -140,9 +139,8 @@ impl TogglApiClient {
base_url = self.base_url
);
let res = self.make_request(self
.client
.get(url))
let res = self
.make_request(self.client.get(url))
.await?
.json::<Option<TimeEntry>>()
.await?;
@ -162,9 +160,7 @@ impl TogglApiClient {
self.workspace_id.parse::<i32>()?.into(),
);
self.make_request(self.client
.post(url)
.json(&body)).await?;
self.make_request(self.client.post(url).json(&body)).await?;
Ok(())
}
@ -180,10 +176,7 @@ impl TogglApiClient {
}
#[instrument(skip(self, filters))]
pub async fn full_report(
&self,
filters: &TogglReportQuery,
) -> crate::Result<Vec<ReportRow>> {
pub async fn full_report(&self, filters: &TogglReportQuery) -> crate::Result<Vec<ReportRow>> {
let url = format!(
"{base_url}/workspace/{workspace_id}/search/time_entries",
base_url = self.reports_base_url,
@ -201,15 +194,15 @@ impl TogglApiClient {
}
// TODO: Implement rate limiting
let response = self.make_request(self
.client
.post(&url)
.json(&Self::paginate_filters(&filters, last_row_number_n)))
let response = self
.make_request(
self.client
.post(&url)
.json(&Self::paginate_filters(&filters, last_row_number_n)),
)
.await?;
let data = response
.json::<Vec<ReportRow>>()
.await?;
let data = response.json::<Vec<ReportRow>>().await?;
last_row_number = data.last().map(|e| e.row_number as u64);

View File

@ -1,9 +1,9 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use serde_with::skip_serializing_none;
use std::collections::HashMap;
use std::option::Option;
use chrono::{DateTime, Utc};
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ReportRow {
@ -46,7 +46,6 @@ pub struct TimeEntry {
pub at: DateTime<Utc>,
pub server_deleted_at: Option<DateTime<Utc>>,
pub user_id: i64,
// Ignored fields
// duronly: bool,
// uid: i64,
@ -65,7 +64,6 @@ pub struct Project {
pub at: DateTime<Utc>,
pub server_deleted_at: Option<DateTime<Utc>>,
pub created_at: DateTime<Utc>,
// cid: Option<serde_json::Value>,
// wid: i64,
// rate: Option<serde_json::Value>,

View File

@ -1,10 +1,10 @@
use crate::entity::time_entry;
use axum::http::StatusCode;
use axum::response::IntoResponse;
use tokio::signal;
use chrono::{NaiveDate, NaiveTime};
use migration::{Condition, IntoCondition};
use sea_orm::ColumnTrait;
use crate::entity::time_entry;
use tokio::signal;
#[derive(Debug)]
pub struct AppError(anyhow::Error);