Compare commits

..

2 Commits

Author SHA1 Message Date
8ce60ce278 rustfmt
All checks were successful
Build and Publish Docker Container / build (push) Successful in 6m33s
2024-05-28 17:38:45 +01:00
fbf473b3b4 Split up the db methods and move to unprepared statements for the notify call 2024-05-28 17:38:25 +01:00

View File

@ -1,26 +1,85 @@
use crate::error::AppError;
use anyhow::anyhow;
use entity::{expenditure, transaction};
use sea_orm::sea_query::OnConflict;
use sea_orm::{ConnectionTrait, DatabaseBackend, QueryFilter, QueryTrait, Statement};
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, Iterable, TransactionTrait};
use migration::PostgresQueryBuilder;
use crate::error::AppError;
use sea_orm::sea_query::OnConflict;
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, Iterable, TransactionTrait};
use sea_orm::{
ConnectionTrait, DatabaseBackend, DatabaseTransaction, DbErr, QueryFilter, QueryTrait,
Statement,
};
pub struct Insertion {
pub transaction: transaction::ActiveModel,
pub contained_expenditures: Vec<expenditure::ActiveModel>,
}
async fn update_expenditures(
tx: &DatabaseTransaction,
insertions: &[Insertion],
) -> Result<(), DbErr> {
// Expenditures can change as we re-categorise them, so we delete all the old ones and
// insert an entirely new set to ensure we don't end up leaving old ones around.
expenditure::Entity::delete_many()
.filter(
expenditure::Column::TransactionId
.is_in(insertions.iter().map(|i| i.transaction.id.as_ref())),
)
.exec(tx)
.await?;
expenditure::Entity::insert_many(
insertions
.iter()
.flat_map(|i| &i.contained_expenditures)
.cloned(),
)
.on_conflict(
OnConflict::columns(vec![
expenditure::Column::TransactionId,
expenditure::Column::Category,
])
.update_columns(expenditure::Column::iter())
.to_owned(),
)
.exec(tx)
.await?;
Ok(())
}
// Note while this is more efficient in db calls, it does bind together the entire group.
// We employ a batching process for now to try balance speed and failure rate, but it is worth
// trying to move failures earlier and improve reporting.
pub async fn insert(db: &DatabaseConnection, insertions: Vec<Insertion>) -> Result<Vec<String>, AppError> {
pub async fn insert(
db: &DatabaseConnection,
insertions: Vec<Insertion>,
) -> Result<Vec<String>, AppError> {
let mut new_transaction_ids = Vec::new();
for insertions in insertions.chunks(400) {
let tx = db.begin().await?;
let inserted_transaction_ids = update_transactions(insertions, &tx).await?;
update_expenditures(&tx, &insertions).await?;
tx.commit().await?;
let insert = transaction::Entity::insert_many(insertions.iter().map(|i| &i.transaction).cloned())
// We wait until the transaction is committed before adding the new transaction ids to the
// list to avoid issues with the transaction being rolled back.
new_transaction_ids.extend(inserted_transaction_ids);
}
// Notify the new transactions once everything is committed.
notify_new_transactions(db, &new_transaction_ids).await?;
Ok(new_transaction_ids)
}
async fn update_transactions(
insertions: &[Insertion],
tx: &DatabaseTransaction,
) -> Result<Vec<String>, AppError> {
let insert =
transaction::Entity::insert_many(insertions.iter().map(|i| &i.transaction).cloned())
.on_conflict(
OnConflict::column(transaction::Column::Id)
.update_columns(transaction::Column::iter())
@ -30,56 +89,27 @@ pub async fn insert(db: &DatabaseConnection, insertions: Vec<Insertion>) -> Resu
.returning_col(transaction::Column::Id)
.build(PostgresQueryBuilder);
let inserted_transaction_ids = tx.query_all(Statement::from_sql_and_values(
let inserted_transaction_ids = tx
.query_all(Statement::from_sql_and_values(
DatabaseBackend::Postgres,
insert.0,
insert.1,
)).await?
.iter()
.map(|r| r.try_get_by("id"))
.collect::<Result<Vec<String>, _>>()?;
))
.await?
.iter()
.map(|r| r.try_get_by("id"))
.collect::<Result<Vec<String>, _>>()?;
Ok(inserted_transaction_ids)
}
// Expenditures can change as we re-categorise them, so we delete all the old ones and
// insert an entirely new set to ensure we don't end up leaving old ones around.
expenditure::Entity::delete_many()
.filter(
expenditure::Column::TransactionId
.is_in(insertions.iter().map(|i| i.transaction.id.as_ref())),
)
.exec(&tx)
.await?;
async fn notify_new_transactions(
db: &DatabaseConnection,
new_transaction_ids: &[String],
) -> Result<(), AppError> {
let payload = serde_json::to_string(&new_transaction_ids).map_err(|e| anyhow!(e))?;
expenditure::Entity::insert_many(
insertions
.iter()
.flat_map(|i| &i.contained_expenditures)
.cloned(),
)
.on_conflict(
OnConflict::columns(vec![
expenditure::Column::TransactionId,
expenditure::Column::Category,
])
.update_columns(expenditure::Column::iter())
.to_owned(),
)
.exec(&tx)
db.execute_unprepared(&format!(r#"NOTIFY monzo_new_transactions, {payload}"#,))
.await?;
tx.commit().await?;
new_transaction_ids.extend(inserted_transaction_ids);
}
let payload = serde_json::to_string(&new_transaction_ids)
.map_err(|e| anyhow!(e))?;
db.execute(
Statement::from_sql_and_values(
DatabaseBackend::Postgres,
"NOTIFY monzo_new_transactions, $1",
vec![sea_orm::Value::from(payload)],
)
).await?;
Ok(new_transaction_ids)
Ok(())
}