From 2eb4b480b9cfc6e184041c83ea67f646b3622e35 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Sat, 26 Aug 2023 22:35:12 +0200 Subject: [PATCH 01/90] possible impl with sqlx, we need to refactor dependencies and features and also probably need to make queries folder for `sqlite` and `mysql` --- fang/Cargo.toml | 2 + .../asynk/simple_async_worker/src/main.rs | 5 +- .../simple_cron_async_worker/src/main.rs | 5 +- .../up.sql | 6 +- fang/src/asynk/async_queue.rs | 390 ++++++++---------- .../asynk/async_queue/async_queue_tests.rs | 20 +- fang/src/asynk/async_runnable.rs | 18 +- fang/src/asynk/async_worker.rs | 47 ++- fang/src/asynk/queries/fail_task.sql | 2 +- fang/src/asynk/queries/fetch_task_type.sql | 2 +- fang/src/asynk/queries/find_task_by_id.sql | 2 +- .../asynk/queries/find_task_by_uniq_hash.sql | 2 +- fang/src/asynk/queries/insert_task.sql | 2 +- fang/src/asynk/queries/insert_task_uniq.sql | 2 +- .../queries/remove_all_scheduled_tasks.sql | 2 +- fang/src/asynk/queries/retry_task.sql | 2 +- fang/src/asynk/queries/update_task_state.sql | 2 +- fang/src/blocking/postgres_schema.rs | 7 +- fang/src/blocking/queue.rs | 25 +- fang/src/blocking/queue/queue_tests.rs | 34 +- fang/src/blocking/worker.rs | 16 +- fang/src/lib.rs | 109 ++++- 22 files changed, 380 insertions(+), 322 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 876adcdd..fc3a37cd 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -46,6 +46,8 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} +# sqlx with no TLS, if you want TLS you must to get feature "tls-native-tls" or "tls-rustls" +sqlx = {version = "0.7", features = ["any" , "macros" , "json" , "uuid" , "chrono" , "runtime-tokio", "postgres", "sqlite", "mysql"] } [dependencies.diesel] version = "2.1" diff --git a/fang/fang_examples/asynk/simple_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_async_worker/src/main.rs index 5a148a5d..fdc7eef1 100644 --- a/fang/fang_examples/asynk/simple_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_async_worker/src/main.rs @@ -7,7 +7,6 @@ use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::run_migrations_postgres; use fang::AsyncRunnable; -use fang::NoTls; use simple_async_worker::MyFailingTask; use simple_async_worker::MyTask; use std::env; @@ -36,10 +35,10 @@ async fn main() { .max_pool_size(max_pool_size) .build(); - queue.connect(NoTls).await.unwrap(); + queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs index e7b7929f..34709be3 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs @@ -3,7 +3,6 @@ use fang::asynk::async_queue::AsyncQueue; use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::AsyncRunnable; -use fang::NoTls; use simple_cron_async_worker::MyCronTask; use std::env; use std::time::Duration; @@ -21,10 +20,10 @@ async fn main() { .max_pool_size(max_pool_size) .build(); - queue.connect(NoTls).await.unwrap(); + queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql b/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql index cd4b3544..572b2bc4 100644 --- a/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql +++ b/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql @@ -3,12 +3,12 @@ CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; CREATE TYPE fang_task_state AS ENUM ('new', 'in_progress', 'failed', 'finished', 'retried'); CREATE TABLE fang_tasks ( - id uuid PRIMARY KEY DEFAULT uuid_generate_v4(), + id BYTEA PRIMARY KEY, metadata jsonb NOT NULL, error_message TEXT, state fang_task_state DEFAULT 'new' NOT NULL, - task_type VARCHAR DEFAULT 'common' NOT NULL, - uniq_hash CHAR(64), + task_type TEXT DEFAULT 'common' NOT NULL, + uniq_hash TEXT, -- just for compatibility with sqlx is text retries INTEGER DEFAULT 0 NOT NULL, scheduled_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 67117ec5..d3ea180f 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -7,26 +7,22 @@ use crate::FangTaskState; use crate::Scheduled::*; use crate::Task; use async_trait::async_trait; -use bb8_postgres::bb8::Pool; -use bb8_postgres::bb8::RunError; -use bb8_postgres::tokio_postgres::row::Row; -use bb8_postgres::tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; -use bb8_postgres::tokio_postgres::Socket; -use bb8_postgres::tokio_postgres::Transaction; -use bb8_postgres::PostgresConnectionManager; + use chrono::DateTime; use chrono::Duration; use chrono::Utc; use cron::Schedule; -use postgres_types::ToSql; use sha2::{Digest, Sha256}; +use sqlx::any::install_default_drivers; +use sqlx::pool::PoolOptions; +use sqlx::types::Uuid; +use sqlx::Acquire; +use sqlx::Any; +use sqlx::AnyPool; +use sqlx::Transaction; use std::str::FromStr; use thiserror::Error; use typed_builder::TypedBuilder; -use uuid::Uuid; - -#[cfg(test)] -use bb8_postgres::tokio_postgres::tls::NoTls; #[cfg(test)] use self::async_queue_tests::test_asynk_queue; @@ -51,9 +47,7 @@ pub const DEFAULT_TASK_TYPE: &str = "common"; #[derive(Debug, Error)] pub enum AsyncQueueError { #[error(transparent)] - PoolError(#[from] RunError), - #[error(transparent)] - PgError(#[from] bb8_postgres::tokio_postgres::Error), + SqlXError(#[from] sqlx::Error), #[error(transparent)] SerdeError(#[from] serde_json::Error), #[error(transparent)] @@ -102,7 +96,7 @@ pub trait AsyncQueueable: Send { async fn remove_all_scheduled_tasks(&mut self) -> Result; /// Remove a task by its id. - async fn remove_task(&mut self, id: Uuid) -> Result; + async fn remove_task(&mut self, id: &[u8]) -> Result; /// Remove a task by its metadata (struct fields values) async fn remove_task_by_metadata( @@ -114,7 +108,7 @@ pub trait AsyncQueueable: Send { async fn remove_tasks_type(&mut self, task_type: &str) -> Result; /// Retrieve a task from storage by its `id`. - async fn find_task_by_id(&mut self, id: Uuid) -> Result; + async fn find_task_by_id(&mut self, id: &[u8]) -> Result; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. @@ -156,15 +150,9 @@ pub trait AsyncQueueable: Send { /// #[derive(TypedBuilder, Debug, Clone)] -pub struct AsyncQueue -where - Tls: MakeTlsConnect + Clone + Send + Sync + 'static, - >::Stream: Send + Sync, - >::TlsConnect: Send, - <>::TlsConnect as TlsConnect>::Future: Send, -{ +pub struct AsyncQueue { #[builder(default=None, setter(skip))] - pool: Option>>, + pool: Option, #[builder(setter(into))] uri: String, #[builder(setter(into))] @@ -180,7 +168,10 @@ use tokio::sync::Mutex; static ASYNC_QUEUE_DB_TEST_COUNTER: Mutex = Mutex::const_new(0); #[cfg(test)] -impl AsyncQueue { +use sqlx::Executor; + +#[cfg(test)] +impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test() -> Self { const BASE_URI: &str = "postgres://postgres:postgres@localhost"; @@ -190,22 +181,22 @@ impl AsyncQueue { .build(); let mut new_number = ASYNC_QUEUE_DB_TEST_COUNTER.lock().await; - res.connect(NoTls).await.unwrap(); + res.connect().await.unwrap(); let db_name = format!("async_queue_test_{}", *new_number); *new_number += 1; - let create_query = format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); - let delete_query = format!("DROP DATABASE IF EXISTS {};", db_name); + let create_query: &str = &format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); + let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - let conn = res.pool.as_mut().unwrap().get().await.unwrap(); + let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); log::info!("Deleting database {db_name} ..."); - conn.execute(&delete_query, &[]).await.unwrap(); + conn.execute(delete_query).await.unwrap(); log::info!("Creating database {db_name} ..."); - while let Err(e) = conn.execute(&create_query, &[]).await { - if e.as_db_error().unwrap().message() + while let Err(e) = conn.execute(create_query).await { + if e.as_database_error().unwrap().message() != "source database \"fang\" is being accessed by other users" { panic!("{:?}", e); @@ -219,19 +210,13 @@ impl AsyncQueue { res.connected = false; res.pool = None; res.uri = format!("{}/{}", BASE_URI, db_name); - res.connect(NoTls).await.unwrap(); + res.connect().await.unwrap(); res } } -impl AsyncQueue -where - Tls: MakeTlsConnect + Clone + Send + Sync + 'static, - >::Stream: Send + Sync, - >::TlsConnect: Send, - <>::TlsConnect as TlsConnect>::Future: Send, -{ +impl AsyncQueue { /// Check if the connection with db is established pub fn check_if_connection(&self) -> Result<(), AsyncQueueError> { if self.connected { @@ -242,12 +227,12 @@ where } /// Connect to the db if not connected - pub async fn connect(&mut self, tls: Tls) -> Result<(), AsyncQueueError> { - let manager = PostgresConnectionManager::new_from_stringlike(self.uri.clone(), tls)?; + pub async fn connect(&mut self) -> Result<(), AsyncQueueError> { + install_default_drivers(); - let pool = Pool::builder() - .max_size(self.max_pool_size) - .build(manager) + let pool: AnyPool = PoolOptions::new() + .max_connections(self.max_pool_size) + .connect(&self.uri) .await?; self.pool = Some(pool); @@ -256,108 +241,129 @@ where } async fn remove_all_tasks_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, ) -> Result { - Self::execute_query(transaction, REMOVE_ALL_TASK_QUERY, &[], None).await + Ok(sqlx::query(REMOVE_ALL_TASK_QUERY) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) } async fn remove_all_scheduled_tasks_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, ) -> Result { - Self::execute_query( - transaction, - REMOVE_ALL_SCHEDULED_TASK_QUERY, - &[&Utc::now()], - None, - ) - .await + let now = Utc::now(); + let now_str = now.to_rfc3339(); + + Ok(sqlx::query(REMOVE_ALL_SCHEDULED_TASK_QUERY) + .bind(now_str) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) } async fn remove_task_query( - transaction: &mut Transaction<'_>, - id: Uuid, + transaction: &mut Transaction<'_, Any>, + id: &[u8], ) -> Result { - Self::execute_query(transaction, REMOVE_TASK_QUERY, &[&id], Some(1)).await + let result = sqlx::query(REMOVE_TASK_QUERY) + .bind(id) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if result != 1 { + Err(AsyncQueueError::ResultError { + expected: 1, + found: result, + }) + } else { + Ok(result) + } } async fn remove_task_by_metadata_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; let uniq_hash = Self::calculate_hash(metadata.to_string()); - Self::execute_query( - transaction, - REMOVE_TASK_BY_METADATA_QUERY, - &[&uniq_hash], - None, - ) - .await + Ok(sqlx::query(REMOVE_TASK_BY_METADATA_QUERY) + .bind(uniq_hash) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) } async fn remove_tasks_type_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task_type: &str, ) -> Result { - Self::execute_query(transaction, REMOVE_TASKS_TYPE_QUERY, &[&task_type], None).await + Ok(sqlx::query(REMOVE_TASKS_TYPE_QUERY) + .bind(task_type) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) } async fn find_task_by_id_query( - transaction: &mut Transaction<'_>, - id: Uuid, + transaction: &mut Transaction<'_, Any>, + id: &[u8], ) -> Result { - let row: Row = transaction.query_one(FIND_TASK_BY_ID_QUERY, &[&id]).await?; + let task: Task = sqlx::query_as(FIND_TASK_BY_ID_QUERY) + .bind(id) + .fetch_one(transaction.acquire().await?) + .await?; - let task = Self::row_to_task(row); Ok(task) } async fn fail_task_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task: &Task, error_message: &str, ) -> Result { - let updated_at = Utc::now(); - - let row: Row = transaction - .query_one( - FAIL_TASK_QUERY, - &[ - &FangTaskState::Failed, - &error_message, - &updated_at, - &task.id, - ], - ) + let updated_at = Utc::now().to_rfc3339(); + + let failed_task: Task = sqlx::query_as(FAIL_TASK_QUERY) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&task.id) + .fetch_one(transaction.acquire().await?) .await?; - let failed_task = Self::row_to_task(row); + Ok(failed_task) } async fn schedule_retry_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task: &Task, backoff_seconds: u32, error: &str, ) -> Result { let now = Utc::now(); + let now_str = now.to_rfc3339(); let scheduled_at = now + Duration::seconds(backoff_seconds as i64); + let scheduled_at_str = scheduled_at.to_rfc3339(); let retries = task.retries + 1; - let row: Row = transaction - .query_one( - RETRY_TASK_QUERY, - &[&error, &retries, &scheduled_at, &now, &task.id], - ) + let failed_task: Task = sqlx::query_as(RETRY_TASK_QUERY) + .bind(error) + .bind(retries) + .bind(scheduled_at_str) + .bind(now_str) + .bind(&task.id) + .fetch_one(transaction.acquire().await?) .await?; - let failed_task = Self::row_to_task(row); + Ok(failed_task) } async fn fetch_and_touch_task_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task_type: Option, ) -> Result, AsyncQueueError> { let task_type = match task_type { @@ -381,85 +387,90 @@ where } async fn get_task_type_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task_type: &str, ) -> Result { - let row: Row = transaction - .query_one(FETCH_TASK_TYPE_QUERY, &[&task_type, &Utc::now()]) - .await?; + let now = Utc::now(); + let now_str = now.to_rfc3339(); - let task = Self::row_to_task(row); + let task: Task = sqlx::query_as(FETCH_TASK_TYPE_QUERY) + .bind(task_type) + .bind(now_str) + .fetch_one(transaction.acquire().await?) + .await?; Ok(task) } async fn update_task_state_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task: &Task, state: FangTaskState, ) -> Result { let updated_at = Utc::now(); + let updated_at_str = updated_at.to_rfc3339(); + + let state_str: &str = state.into(); - let row: Row = transaction - .query_one(UPDATE_TASK_STATE_QUERY, &[&state, &updated_at, &task.id]) + let task: Task = sqlx::query_as(UPDATE_TASK_STATE_QUERY) + .bind(state_str) + .bind(updated_at_str) + .bind(&task.id) + .fetch_one(transaction.acquire().await?) .await?; - let task = Self::row_to_task(row); + Ok(task) } async fn insert_task_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { - let row: Row = transaction - .query_one(INSERT_TASK_QUERY, &[&metadata, &task_type, &scheduled_at]) + let uuid = Uuid::new_v4(); + let bytes: &[u8] = &uuid.to_bytes_le(); + + let metadata_str = metadata.to_string(); + let scheduled_at_str = scheduled_at.to_rfc3339(); + + let task: Task = sqlx::query_as(INSERT_TASK_QUERY) + .bind(bytes) + .bind(metadata_str) + .bind(task_type) + .bind(scheduled_at_str) + .fetch_one(transaction.acquire().await?) .await?; - let task = Self::row_to_task(row); Ok(task) } async fn insert_task_uniq_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { + let uuid = Uuid::new_v4(); + let bytes: &[u8] = &uuid.to_bytes_le(); + let uniq_hash = Self::calculate_hash(metadata.to_string()); - let row: Row = transaction - .query_one( - INSERT_TASK_UNIQ_QUERY, - &[&metadata, &task_type, &uniq_hash, &scheduled_at], - ) - .await?; + let metadata_str = metadata.to_string(); + let scheduled_at_str = scheduled_at.to_rfc3339(); - let task = Self::row_to_task(row); + let task: Task = sqlx::query_as(INSERT_TASK_UNIQ_QUERY) + .bind(bytes) + .bind(metadata_str) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at_str) + .fetch_one(transaction.acquire().await?) + .await?; Ok(task) } - async fn execute_query( - transaction: &mut Transaction<'_>, - query: &str, - params: &[&(dyn ToSql + Sync)], - expected_result_count: Option, - ) -> Result { - let result = transaction.execute(query, params).await?; - - if let Some(expected_result) = expected_result_count { - if result != expected_result { - return Err(AsyncQueueError::ResultError { - expected: expected_result, - found: result, - }); - } - } - Ok(result) - } - async fn insert_task_if_not_exist_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, @@ -480,51 +491,20 @@ where } async fn find_task_by_uniq_hash_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, metadata: &serde_json::Value, ) -> Option { let uniq_hash = Self::calculate_hash(metadata.to_string()); - let result = transaction - .query_one(FIND_TASK_BY_UNIQ_HASH_QUERY, &[&uniq_hash]) - .await; - - match result { - Ok(row) => Some(Self::row_to_task(row)), - Err(_) => None, - } - } - - fn row_to_task(row: Row) -> Task { - let id: Uuid = row.get("id"); - let metadata: serde_json::Value = row.get("metadata"); - - let error_message: Option = row.try_get("error_message").ok(); - - let uniq_hash: Option = row.try_get("uniq_hash").ok(); - let state: FangTaskState = row.get("state"); - let task_type: String = row.get("task_type"); - let retries: i32 = row.get("retries"); - let created_at: DateTime = row.get("created_at"); - let updated_at: DateTime = row.get("updated_at"); - let scheduled_at: DateTime = row.get("scheduled_at"); - - Task::builder() - .id(id) - .metadata(metadata) - .error_message(error_message) - .state(state) - .uniq_hash(uniq_hash) - .task_type(task_type) - .retries(retries) - .created_at(created_at) - .updated_at(updated_at) - .scheduled_at(scheduled_at) - .build() + sqlx::query_as(FIND_TASK_BY_UNIQ_HASH_QUERY) + .bind(uniq_hash) + .fetch_one(transaction.acquire().await.ok()?) + .await + .ok() } async fn schedule_task_query( - transaction: &mut Transaction<'_>, + transaction: &mut Transaction<'_, Any>, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; @@ -563,17 +543,10 @@ where } #[async_trait] -impl AsyncQueueable for AsyncQueue -where - Tls: MakeTlsConnect + Clone + Send + Sync + 'static, - >::Stream: Send + Sync, - >::TlsConnect: Send, - <>::TlsConnect as TlsConnect>::Future: Send, -{ - async fn find_task_by_id(&mut self, id: Uuid) -> Result { +impl AsyncQueueable for AsyncQueue { + async fn find_task_by_id(&mut self, id: &[u8]) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let task = Self::find_task_by_id_query(&mut transaction, id).await?; @@ -587,8 +560,7 @@ where task_type: Option, ) -> Result, AsyncQueueError> { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let task = Self::fetch_and_touch_task_query(&mut transaction, task_type).await?; @@ -599,12 +571,10 @@ where async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; - + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let metadata = serde_json::to_value(task)?; - let task: Task = if !task.uniq() { + let task = if !task.uniq() { Self::insert_task_query(&mut transaction, metadata, &task.task_type(), Utc::now()) .await? } else { @@ -624,19 +594,18 @@ where async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let task = Self::schedule_task_query(&mut transaction, task).await?; transaction.commit().await?; + Ok(task) } async fn remove_all_tasks(&mut self) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let result = Self::remove_all_tasks_query(&mut transaction).await?; @@ -647,8 +616,7 @@ where async fn remove_all_scheduled_tasks(&mut self) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let result = Self::remove_all_scheduled_tasks_query(&mut transaction).await?; @@ -657,10 +625,9 @@ where Ok(result) } - async fn remove_task(&mut self, id: Uuid) -> Result { + async fn remove_task(&mut self, id: &[u8]) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let result = Self::remove_task_query(&mut transaction, id).await?; @@ -675,8 +642,7 @@ where ) -> Result { if task.uniq() { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let result = Self::remove_task_by_metadata_query(&mut transaction, task).await?; @@ -690,8 +656,7 @@ where async fn remove_tasks_type(&mut self, task_type: &str) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let result = Self::remove_tasks_type_query(&mut transaction, task_type).await?; @@ -706,10 +671,10 @@ where state: FangTaskState, ) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let task = Self::update_task_state_query(&mut transaction, task, state).await?; + transaction.commit().await?; Ok(task) @@ -721,10 +686,10 @@ where error_message: &str, ) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let mut transaction = self.pool.as_ref().unwrap().begin().await?; let task = Self::fail_task_query(&mut transaction, task, error_message).await?; + transaction.commit().await?; Ok(task) @@ -737,16 +702,17 @@ where error: &str, ) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; - let task = + let mut transaction = self.pool.as_ref().unwrap().begin().await?; + + let failed_task = Self::schedule_retry_query(&mut transaction, task, backoff_seconds, error).await?; + transaction.commit().await?; - Ok(task) + Ok(failed_task) } } #[cfg(test)] -test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::::test()} +test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test()} diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index f6ea2ece..afd14314 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -98,7 +98,7 @@ macro_rules! test_asynk_queue { let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); - let id = task.id; + let id: &[u8] = &task.id; assert_eq!(Some(1), number); assert_eq!(Some("AsyncTask"), type_task); @@ -121,7 +121,7 @@ macro_rules! test_asynk_queue { let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); - let id = task.id; + let id: &[u8] = &task.id; assert_eq!(Some(1), number); assert_eq!(Some("AsyncTask"), type_task); @@ -251,7 +251,9 @@ macro_rules! test_asynk_queue { let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); - let metadata = task.metadata.as_object().unwrap(); + println!("{:?}", task.metadata); + + let metadata = task.metadata.as_object().expect("metadata F"); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); @@ -260,14 +262,22 @@ macro_rules! test_asynk_queue { let task = test.insert_task(&AsyncTask { number: 2 }).await.unwrap(); - let metadata = task.metadata.as_object().unwrap(); + let metadata = task + .metadata + .as_object() + .expect("borrado mi amigo salio mal"); + let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); assert_eq!(Some(2), number); assert_eq!(Some("AsyncTask"), type_task); - let result = test.remove_tasks_type("mytype").await.unwrap(); + let result = test + .remove_tasks_type("mytype") + .await + .expect("el numero salio bad"); + assert_eq!(0, result); let result = test.remove_tasks_type("common").await.unwrap(); diff --git a/fang/src/asynk/async_runnable.rs b/fang/src/asynk/async_runnable.rs index bde2bed3..380c05ca 100644 --- a/fang/src/asynk/async_runnable.rs +++ b/fang/src/asynk/async_runnable.rs @@ -3,9 +3,8 @@ use crate::asynk::async_queue::AsyncQueueable; use crate::FangError; use crate::Scheduled; use async_trait::async_trait; -use bb8_postgres::bb8::RunError; -use bb8_postgres::tokio_postgres::Error as TokioPostgresError; use serde_json::Error as SerdeError; +use sqlx::Error as SqlXError; const COMMON_TYPE: &str = "common"; pub const RETRIES_NUMBER: i32 = 20; @@ -19,18 +18,11 @@ impl From for FangError { } } -impl From for FangError { - fn from(error: TokioPostgresError) -> Self { - Self::from(AsyncQueueError::PgError(error)) +impl From for FangError { + fn from(error: SqlXError) -> Self { + Self::from(AsyncQueueError::SqlXError(error)) } } - -impl From> for FangError { - fn from(error: RunError) -> Self { - Self::from(AsyncQueueError::PoolError(error)) - } -} - impl From for FangError { fn from(error: SerdeError) -> Self { Self::from(AsyncQueueError::SerdeError(error)) @@ -40,7 +32,7 @@ impl From for FangError { /// Implement this trait to run your custom tasks. #[typetag::serde(tag = "type")] #[async_trait] -pub trait AsyncRunnable: Send + Sync { +pub trait AsyncRunnable: Send + Sync + Send { /// Execute the task. This method should define its logic async fn run(&self, client: &mut dyn AsyncQueueable) -> Result<(), FangError>; diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index 79698546..d74d3f6e 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -68,11 +68,11 @@ where } }, RetentionMode::RemoveAll => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } RetentionMode::RemoveFinished => match result { Ok(_) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } Err(error) => { self.queue.fail_task(task, &error.description).await?; @@ -187,15 +187,15 @@ impl<'a> AsyncWorkerTest<'a> { }, RetentionMode::RemoveAll => match result { Ok(_) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } Err(_error) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } }, RetentionMode::RemoveFinished => match result { Ok(_) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } Err(error) => { self.queue.fail_task(task, &error.description).await?; @@ -260,7 +260,6 @@ mod async_worker_tests { use crate::RetentionMode; use crate::Scheduled; use async_trait::async_trait; - use bb8_postgres::tokio_postgres::NoTls; use chrono::Duration; use chrono::Utc; use serde::{Deserialize, Serialize}; @@ -387,12 +386,12 @@ mod async_worker_tests { #[tokio::test] async fn execute_and_finishes_task() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let actual_task = WorkerAsyncTask { number: 1 }; let task = insert_task(&mut test, &actual_task).await; - let id = task.id; + let id: &[u8] = &task.id; let mut worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) @@ -407,7 +406,7 @@ mod async_worker_tests { #[tokio::test] async fn schedule_task_test() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let actual_task = WorkerAsyncTaskSchedule { number: 1 }; @@ -422,7 +421,7 @@ mod async_worker_tests { worker.run_tasks_until_none().await.unwrap(); - let task = worker.queue.find_task_by_id(id).await.unwrap(); + let task = worker.queue.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::New, task.state); @@ -431,14 +430,14 @@ mod async_worker_tests { worker.run_tasks_until_none().await.unwrap(); - let task = test.find_task_by_id(id).await.unwrap(); + let task = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Finished, task.state); } #[tokio::test] async fn retries_task_test() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let actual_task = AsyncRetryTask {}; @@ -453,7 +452,7 @@ mod async_worker_tests { worker.run_tasks_until_none().await.unwrap(); - let task = worker.queue.find_task_by_id(id).await.unwrap(); + let task = worker.queue.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Retried, task.state); @@ -462,7 +461,7 @@ mod async_worker_tests { tokio::time::sleep(core::time::Duration::from_secs(5)).await; worker.run_tasks_until_none().await.unwrap(); - let task = worker.queue.find_task_by_id(id).await.unwrap(); + let task = worker.queue.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Retried, task.state); @@ -471,7 +470,7 @@ mod async_worker_tests { tokio::time::sleep(core::time::Duration::from_secs(10)).await; worker.run_tasks_until_none().await.unwrap(); - let task = test.find_task_by_id(id).await.unwrap(); + let task = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Failed, task.state); assert_eq!("Failed".to_string(), task.error_message.unwrap()); @@ -479,11 +478,11 @@ mod async_worker_tests { #[tokio::test] async fn saves_error_for_failed_task() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let failed_task = AsyncFailedTask { number: 1 }; let task = insert_task(&mut test, &failed_task).await; - let id = task.id; + let id: &[u8] = &task.id; let mut worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) @@ -503,7 +502,7 @@ mod async_worker_tests { #[tokio::test] async fn executes_task_only_of_specific_type() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let task1 = insert_task(&mut test, &AsyncTaskType1 {}).await; let task12 = insert_task(&mut test, &AsyncTaskType1 {}).await; @@ -520,9 +519,9 @@ mod async_worker_tests { .build(); worker.run_tasks_until_none().await.unwrap(); - let task1 = test.find_task_by_id(id1).await.unwrap(); - let task12 = test.find_task_by_id(id12).await.unwrap(); - let task2 = test.find_task_by_id(id2).await.unwrap(); + let task1 = test.find_task_by_id(&id1).await.unwrap(); + let task12 = test.find_task_by_id(&id12).await.unwrap(); + let task2 = test.find_task_by_id(&id2).await.unwrap(); assert_eq!(id1, task1.id); assert_eq!(id12, task12.id); @@ -534,7 +533,7 @@ mod async_worker_tests { #[tokio::test] async fn remove_when_finished() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let task1 = insert_task(&mut test, &AsyncTaskType1 {}).await; let task12 = insert_task(&mut test, &AsyncTaskType1 {}).await; @@ -564,13 +563,13 @@ mod async_worker_tests { assert_eq!(id2, task2.id); } - async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { + async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { test.insert_task(task).await.unwrap() } #[tokio::test] async fn no_schedule_until_run() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test().await; let _task_1 = test .schedule_task(&WorkerAsyncTaskScheduled {}) diff --git a/fang/src/asynk/queries/fail_task.sql b/fang/src/asynk/queries/fail_task.sql index 17192868..319449e4 100644 --- a/fang/src/asynk/queries/fail_task.sql +++ b/fang/src/asynk/queries/fail_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1 , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING * +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3::timestamptz WHERE id = $4 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/fetch_task_type.sql b/fang/src/asynk/queries/fetch_task_type.sql index e0558202..cafaa556 100644 --- a/fang/src/asynk/queries/fetch_task_type.sql +++ b/fang/src/asynk/queries/fetch_task_type.sql @@ -1 +1 @@ -SELECT * FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2::timestamptz >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries/find_task_by_id.sql b/fang/src/asynk/queries/find_task_by_id.sql index 608166f5..1a26b723 100644 --- a/fang/src/asynk/queries/find_task_by_id.sql +++ b/fang/src/asynk/queries/find_task_by_id.sql @@ -1 +1 @@ -SELECT * FROM fang_tasks WHERE id = $1 +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE id = $1 diff --git a/fang/src/asynk/queries/find_task_by_uniq_hash.sql b/fang/src/asynk/queries/find_task_by_uniq_hash.sql index cb53f45c..49f26982 100644 --- a/fang/src/asynk/queries/find_task_by_uniq_hash.sql +++ b/fang/src/asynk/queries/find_task_by_uniq_hash.sql @@ -1 +1 @@ -SELECT * FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries/insert_task.sql b/fang/src/asynk/queries/insert_task.sql index 514d921a..920af791 100644 --- a/fang/src/asynk/queries/insert_task.sql +++ b/fang/src/asynk/queries/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3) RETURNING * +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, to_json($2), $3, $4::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/insert_task_uniq.sql b/fang/src/asynk/queries/insert_task_uniq.sql index 08173836..5cccc807 100644 --- a/fang/src/asynk/queries/insert_task_uniq.sql +++ b/fang/src/asynk/queries/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2 , $3, $4) RETURNING * +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, to_json($2) , $3, $4, $5::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries/remove_all_scheduled_tasks.sql index 61a5b6b5..c102d9c7 100644 --- a/fang/src/asynk/queries/remove_all_scheduled_tasks.sql +++ b/fang/src/asynk/queries/remove_all_scheduled_tasks.sql @@ -1 +1 @@ -DELETE FROM "fang_tasks" WHERE scheduled_at > $1 +DELETE FROM "fang_tasks" WHERE scheduled_at > $1::timestamptz diff --git a/fang/src/asynk/queries/retry_task.sql b/fang/src/asynk/queries/retry_task.sql index f26267cd..24fad7ea 100644 --- a/fang/src/asynk/queries/retry_task.sql +++ b/fang/src/asynk/queries/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5 RETURNING * +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3::timestamptz, "updated_at" = $4::timestamptz WHERE id = $5 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/update_task_state.sql b/fang/src/asynk/queries/update_task_state.sql index e2e2d94d..eef72eb5 100644 --- a/fang/src/asynk/queries/update_task_state.sql +++ b/fang/src/asynk/queries/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1 , "updated_at" = $2 WHERE id = $3 RETURNING * +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2::timestamptz WHERE id = $3 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/blocking/postgres_schema.rs b/fang/src/blocking/postgres_schema.rs index 15b051c7..b0abc78d 100644 --- a/fang/src/blocking/postgres_schema.rs +++ b/fang/src/blocking/postgres_schema.rs @@ -11,13 +11,12 @@ diesel::table! { use super::sql_types::FangTaskState; fang_tasks (id) { - id -> Uuid, + id -> Bytea, metadata -> Jsonb, error_message -> Nullable, state -> FangTaskState, - task_type -> Varchar, - #[max_length = 64] - uniq_hash -> Nullable, + task_type -> Text, + uniq_hash -> Nullable, retries -> Int4, scheduled_at -> Timestamptz, created_at -> Timestamptz, diff --git a/fang/src/blocking/queue.rs b/fang/src/blocking/queue.rs index 3716833f..86bf1f6f 100644 --- a/fang/src/blocking/queue.rs +++ b/fang/src/blocking/queue.rs @@ -35,6 +35,8 @@ pub type PoolConnection = PooledConnection>; #[derive(Insertable, Debug, Eq, PartialEq, Clone, TypedBuilder)] #[diesel(table_name = fang_tasks)] pub struct NewTask { + #[builder(setter(into))] + id: Vec, #[builder(setter(into))] metadata: serde_json::Value, #[builder(setter(into))] @@ -85,14 +87,14 @@ pub trait Queueable { fn remove_tasks_of_type(&self, task_type: &str) -> Result; /// Remove a task by its id. - fn remove_task(&self, id: Uuid) -> Result; + fn remove_task(&self, id: &[u8]) -> Result; /// To use this function task has to be uniq. uniq() has to return true. /// If task is not uniq this function will not do anything. /// Remove a task by its metadata (struct fields values) fn remove_task_by_metadata(&self, task: &dyn Runnable) -> Result; - fn find_task_by_id(&self, id: Uuid) -> Option; + fn find_task_by_id(&self, id: &[u8]) -> Option; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. @@ -175,7 +177,7 @@ impl Queueable for Queue { Self::remove_tasks_of_type_query(&mut connection, task_type) } - fn remove_task(&self, id: Uuid) -> Result { + fn remove_task(&self, id: &[u8]) -> Result { let mut connection = self.get_connection()?; Self::remove_task_query(&mut connection, id) @@ -205,7 +207,7 @@ impl Queueable for Queue { Self::fail_task_query(&mut connection, task, error) } - fn find_task_by_id(&self, id: Uuid) -> Option { + fn find_task_by_id(&self, id: &[u8]) -> Option { let mut connection = self.get_connection().unwrap(); Self::find_task_by_id_query(&mut connection, id) @@ -285,7 +287,11 @@ impl Queue { scheduled_at: DateTime, ) -> Result { if !params.uniq() { + let uuid = Uuid::new_v4(); + let id: Vec = uuid.to_bytes_le().to_vec(); + let new_task = NewTask::builder() + .id(id) .scheduled_at(scheduled_at) .uniq_hash(None) .task_type(params.task_type()) @@ -303,7 +309,11 @@ impl Queue { match Self::find_task_by_uniq_hash_query(connection, &uniq_hash) { Some(task) => Ok(task), None => { + let uuid = Uuid::new_v4(); + let id: Vec = uuid.to_bytes_le().to_vec(); + let new_task = NewTask::builder() + .id(id) .scheduled_at(scheduled_at) .uniq_hash(Some(uniq_hash)) .task_type(params.task_type()) @@ -344,7 +354,7 @@ impl Queue { }) } - pub fn find_task_by_id_query(connection: &mut PgConnection, id: Uuid) -> Option { + pub fn find_task_by_id_query(connection: &mut PgConnection, id: &[u8]) -> Option { fang_tasks::table .filter(fang_tasks::id.eq(id)) .first::(connection) @@ -385,7 +395,10 @@ impl Queue { Ok(diesel::delete(query).execute(connection)?) } - pub fn remove_task_query(connection: &mut PgConnection, id: Uuid) -> Result { + pub fn remove_task_query( + connection: &mut PgConnection, + id: &[u8], + ) -> Result { let query = fang_tasks::table.filter(fang_tasks::id.eq(id)); Ok(diesel::delete(query).execute(connection)?) diff --git a/fang/src/blocking/queue/queue_tests.rs b/fang/src/blocking/queue/queue_tests.rs index 7529af23..8bf8c1ee 100644 --- a/fang/src/blocking/queue/queue_tests.rs +++ b/fang/src/blocking/queue/queue_tests.rs @@ -269,8 +269,8 @@ macro_rules! test_queue { let result = queue.remove_all_tasks().unwrap(); assert_eq!(2, result); - assert_eq!(None, queue.find_task_by_id(task1.id)); - assert_eq!(None, queue.find_task_by_id(task2.id)); + assert_eq!(None, queue.find_task_by_id(&task1.id)); + assert_eq!(None, queue.find_task_by_id(&task2.id)); } #[test] @@ -283,13 +283,13 @@ macro_rules! test_queue { let task1 = queue.insert_task(&task1).unwrap(); let task2 = queue.insert_task(&task2).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_some()); - queue.remove_task(task1.id).unwrap(); + queue.remove_task(&task1.id).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_none()); - assert!(queue.find_task_by_id(task2.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_none()); + assert!(queue.find_task_by_id(&task2.id).is_some()); } #[test] @@ -302,13 +302,13 @@ macro_rules! test_queue { let task1 = queue.insert_task(&task1).unwrap(); let task2 = queue.insert_task(&task2).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_some()); queue.remove_tasks_of_type("weirdo").unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_none()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_none()); } #[test] @@ -323,15 +323,15 @@ macro_rules! test_queue { let task2 = queue.insert_task(&m_task2).unwrap(); let task3 = queue.insert_task(&m_task3).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_some()); - assert!(queue.find_task_by_id(task3.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_some()); + assert!(queue.find_task_by_id(&task3.id).is_some()); queue.remove_task_by_metadata(&m_task1).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_none()); - assert!(queue.find_task_by_id(task2.id).is_some()); - assert!(queue.find_task_by_id(task3.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_none()); + assert!(queue.find_task_by_id(&task2.id).is_some()); + assert!(queue.find_task_by_id(&task3.id).is_some()); } } }; diff --git a/fang/src/blocking/worker.rs b/fang/src/blocking/worker.rs index afa2269d..f2dc33a0 100644 --- a/fang/src/blocking/worker.rs +++ b/fang/src/blocking/worker.rs @@ -136,12 +136,12 @@ where } RetentionMode::RemoveAll => { - self.queue.remove_task(task.id)?; + self.queue.remove_task(&task.id)?; } RetentionMode::RemoveFinished => match result { Ok(_) => { - self.queue.remove_task(task.id)?; + self.queue.remove_task(&task.id)?; } Err(error) => { self.queue.fail_task(task, &error.description)?; @@ -305,7 +305,7 @@ mod worker_tests { // this operation commits and thats why need to commit this test worker.run(&task).unwrap(); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Finished, found_task.state); @@ -340,10 +340,10 @@ mod worker_tests { std::thread::sleep(std::time::Duration::from_millis(1000)); - let found_task1 = Queue::find_task_by_id_query(&mut pooled_connection, task1.id).unwrap(); + let found_task1 = Queue::find_task_by_id_query(&mut pooled_connection, &task1.id).unwrap(); assert_eq!(FangTaskState::Finished, found_task1.state); - let found_task2 = Queue::find_task_by_id_query(&mut pooled_connection, task2.id).unwrap(); + let found_task2 = Queue::find_task_by_id_query(&mut pooled_connection, &task2.id).unwrap(); assert_eq!(FangTaskState::New, found_task2.state); Queue::remove_tasks_of_type_query(&mut pooled_connection, "type1").unwrap(); @@ -373,7 +373,7 @@ mod worker_tests { worker.run(&task).unwrap(); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Failed, found_task.state); assert_eq!( @@ -409,7 +409,7 @@ mod worker_tests { std::thread::sleep(std::time::Duration::from_millis(1000)); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Retried, found_task.state); assert_eq!(1, found_task.retries); @@ -420,7 +420,7 @@ mod worker_tests { worker.run_tasks_until_none().unwrap(); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Failed, found_task.state); assert_eq!(2, found_task.retries); diff --git a/fang/src/lib.rs b/fang/src/lib.rs index e6abb131..b658826d 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -2,15 +2,14 @@ #[cfg(feature = "blocking")] use diesel::{Identifiable, Queryable}; +use sqlx::any::AnyRow; +use sqlx::FromRow; +use sqlx::Row; use std::time::Duration; use thiserror::Error; use typed_builder::TypedBuilder; -use uuid::Uuid; - -#[cfg(feature = "asynk")] -use postgres_types::{FromSql, ToSql}; -/// Represents a schedule for scheduled tasks. /// +/// Represents a schedule for scheduled tasks. /// It's used in the [`AsyncRunnable::cron`] and [`Runnable::cron`] #[derive(Debug, Clone)] pub enum Scheduled { @@ -104,40 +103,58 @@ pub struct FangError { /// Possible states of the task #[derive(Debug, Eq, PartialEq, Clone)] #[cfg_attr(feature = "blocking", derive(diesel_derive_enum::DbEnum))] -#[cfg_attr(feature = "asynk", derive(ToSql, FromSql, Default))] -#[cfg_attr(feature = "asynk", postgres(name = "fang_task_state"))] #[cfg_attr( feature = "blocking", ExistingTypePath = "crate::postgres_schema::sql_types::FangTaskState" )] pub enum FangTaskState { /// The task is ready to be executed - #[cfg_attr(feature = "asynk", postgres(name = "new"))] - #[cfg_attr(feature = "asynk", default)] New, /// The task is being executed. /// /// The task may stay in this state forever /// if an unexpected error happened - #[cfg_attr(feature = "asynk", postgres(name = "in_progress"))] InProgress, /// The task failed - #[cfg_attr(feature = "asynk", postgres(name = "failed"))] Failed, /// The task finished successfully - #[cfg_attr(feature = "asynk", postgres(name = "finished"))] Finished, /// The task is being retried. It means it failed but it's scheduled to be executed again - #[cfg_attr(feature = "asynk", postgres(name = "retried"))] Retried, } +impl> From for FangTaskState { + fn from(str: S) -> Self { + let str = str.as_ref(); + match str { + "new" => FangTaskState::New, + "in_progress" => FangTaskState::InProgress, + "failed" => FangTaskState::Failed, + "finished" => FangTaskState::Finished, + "retried" => FangTaskState::Retried, + _ => unreachable!(), + } + } +} + +impl From for &str { + fn from(state: FangTaskState) -> Self { + match state { + FangTaskState::New => "new", + FangTaskState::InProgress => "in_progress", + FangTaskState::Failed => "failed", + FangTaskState::Finished => "finished", + FangTaskState::Retried => "retried", + } + } +} + #[derive(Debug, Eq, PartialEq, Clone, TypedBuilder)] #[cfg_attr(feature = "blocking", derive(Queryable, Identifiable))] -#[cfg_attr(feature = "blocking", diesel(table_name = fang_tasks))] +#[diesel(table_name = fang_tasks)] pub struct Task { #[builder(setter(into))] - pub id: Uuid, + pub id: Vec, #[builder(setter(into))] pub metadata: serde_json::Value, #[builder(setter(into))] @@ -158,6 +175,68 @@ pub struct Task { pub updated_at: DateTime, } +#[cfg(feature = "asynk")] +impl<'a> FromRow<'a, AnyRow> for Task { + fn from_row(row: &'a AnyRow) -> Result { + let id: Vec = row.get("id"); + + let raw: &str = row.get("metadata"); // will work if database cast json to string + let raw = raw.replace("\\", ""); + + let mut chars = raw.chars(); + chars.next(); + chars.next_back(); + let raw = chars.as_str(); + + let metadata: serde_json::Value = serde_json::from_str(raw).unwrap(); + + // This should be changed when issue https://github.com/launchbadge/sqlx/issues/2416 is fixed + let error_message: Option = row.try_get("error_message").ok(); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // This should be changed when issue https://github.com/launchbadge/sqlx/issues/2416 is fixed + let uniq_hash: Option = row.try_get("uniq_hash").ok(); + + let retries: i32 = row.get("retries"); + + let scheduled_at_str: &str = row.get("scheduled_at"); + + let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let created_at_str: &str = row.get("created_at"); + + let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let updated_at_str: &str = row.get("updated_at"); + + let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + #[doc(hidden)] #[cfg(feature = "blocking")] extern crate diesel; From 1a09371bd3245b61ef81d71ec72a75618e96d741 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Sat, 26 Aug 2023 22:43:19 +0200 Subject: [PATCH 02/90] fix clippy xd --- fang/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fang/src/lib.rs b/fang/src/lib.rs index b658826d..061db51f 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -181,7 +181,7 @@ impl<'a> FromRow<'a, AnyRow> for Task { let id: Vec = row.get("id"); let raw: &str = row.get("metadata"); // will work if database cast json to string - let raw = raw.replace("\\", ""); + let raw = raw.replace('\\', ""); let mut chars = raw.chars(); chars.next(); From cbe2d9945edd836c2caf1a1eb09cee2b6f30bb58 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 28 Aug 2023 21:41:08 +0200 Subject: [PATCH 03/90] finally sqlite almost work, need help to debug something --- .gitignore | 1 + fang/Cargo.toml | 38 +- .../asynk/simple_async_worker/Cargo.toml | 2 +- .../asynk/simple_cron_async_worker/Cargo.toml | 2 +- .../up.sql | 10 +- fang/src/asynk/async_queue.rs | 375 +++++++++++++++--- .../asynk/async_queue/async_queue_tests.rs | 15 +- fang/src/asynk/async_worker.rs | 14 +- fang/src/asynk/queries/insert_task.sql | 1 - fang/src/asynk/queries/insert_task_uniq.sql | 1 - .../fail_task.sql | 0 .../fetch_task_type.sql | 0 .../find_task_by_id.sql | 0 .../find_task_by_uniq_hash.sql | 0 .../asynk/queries_postgres/insert_task.sql | 1 + .../queries_postgres/insert_task_uniq.sql | 1 + .../remove_all_scheduled_tasks.sql | 0 .../remove_all_tasks.sql | 0 .../remove_task.sql | 0 .../remove_task_by_metadata.sql | 0 .../remove_tasks_type.sql | 0 .../retry_task.sql | 0 .../update_task_state.sql | 0 fang/src/asynk/queries_sqlite/fail_task.sql | 1 + .../asynk/queries_sqlite/fetch_task_type.sql | 1 + .../asynk/queries_sqlite/find_task_by_id.sql | 1 + .../queries_sqlite/find_task_by_uniq_hash.sql | 1 + fang/src/asynk/queries_sqlite/insert_task.sql | 1 + .../asynk/queries_sqlite/insert_task_uniq.sql | 1 + .../remove_all_scheduled_tasks.sql | 1 + .../asynk/queries_sqlite/remove_all_tasks.sql | 1 + fang/src/asynk/queries_sqlite/remove_task.sql | 1 + .../remove_task_by_metadata.sql | 1 + .../queries_sqlite/remove_tasks_type.sql | 1 + fang/src/asynk/queries_sqlite/retry_task.sql | 1 + .../queries_sqlite/update_task_state.sql | 1 + fang/src/blocking/sqlite_schema.rs | 8 +- fang/src/lib.rs | 43 +- 38 files changed, 390 insertions(+), 135 deletions(-) delete mode 100644 fang/src/asynk/queries/insert_task.sql delete mode 100644 fang/src/asynk/queries/insert_task_uniq.sql rename fang/src/asynk/{queries => queries_postgres}/fail_task.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/fetch_task_type.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/find_task_by_id.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/find_task_by_uniq_hash.sql (100%) create mode 100644 fang/src/asynk/queries_postgres/insert_task.sql create mode 100644 fang/src/asynk/queries_postgres/insert_task_uniq.sql rename fang/src/asynk/{queries => queries_postgres}/remove_all_scheduled_tasks.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/remove_all_tasks.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/remove_task.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/remove_task_by_metadata.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/remove_tasks_type.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/retry_task.sql (100%) rename fang/src/asynk/{queries => queries_postgres}/update_task_state.sql (100%) create mode 100644 fang/src/asynk/queries_sqlite/fail_task.sql create mode 100644 fang/src/asynk/queries_sqlite/fetch_task_type.sql create mode 100644 fang/src/asynk/queries_sqlite/find_task_by_id.sql create mode 100644 fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql create mode 100644 fang/src/asynk/queries_sqlite/insert_task.sql create mode 100644 fang/src/asynk/queries_sqlite/insert_task_uniq.sql create mode 100644 fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql create mode 100644 fang/src/asynk/queries_sqlite/remove_all_tasks.sql create mode 100644 fang/src/asynk/queries_sqlite/remove_task.sql create mode 100644 fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql create mode 100644 fang/src/asynk/queries_sqlite/remove_tasks_type.sql create mode 100644 fang/src/asynk/queries_sqlite/retry_task.sql create mode 100644 fang/src/asynk/queries_sqlite/update_task_state.sql diff --git a/.gitignore b/.gitignore index 57b9c821..ad8e44c8 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ src/schema.rs docs/content/docs/CHANGELOG.md docs/content/docs/README.md fang.db +tests_sqlite/ diff --git a/fang/Cargo.toml b/fang/Cargo.toml index fc3a37cd..65fdb66f 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -15,22 +15,28 @@ rust-version = "1.62" doctest = false [features] -default = ["blocking", "asynk", "derive-error", "postgres", "mysql" , "sqlite", "migrations_postgres", "migrations_sqlite" , "migrations_mysql"] -blocking = ["dep:diesel", "dep:diesel-derive-enum", "dep:dotenvy", "diesel?/chrono" , "diesel?/serde_json" , "diesel?/uuid"] -asynk = ["dep:bb8-postgres", "dep:postgres-types", "dep:tokio", "dep:async-trait", "dep:async-recursion"] +default = ["blocking", "asynk-sqlx", "derive-error", "blocking-postgres", "blocking-mysql" , "blocking-sqlite", "migrations-postgres", "migrations-sqlite" , "migrations-mysql"] +asynk-postgres = ["asynk-sqlx" , "sqlx?/postgres"] +asynk-sqlite = ["asynk-sqlx" , "sqlx?/sqlite"] +asynk-mysql = ["asynk-sqlx" , "sqlx?/mysql"] +asynk-sqlx = ["asynk" , "dep:sqlx"] +asynk = ["dep:tokio", "dep:async-trait", "dep:async-recursion" ] derive-error = ["dep:fang-derive-error"] -postgres = ["diesel?/postgres" , "diesel?/serde_json", "diesel?/chrono" , "diesel?/uuid" , "diesel?/r2d2"] -sqlite = ["diesel?/sqlite" , "diesel?/serde_json", "diesel?/chrono" , "diesel?/uuid" , "diesel?/r2d2"] -mysql = ["diesel?/mysql" , "diesel?/serde_json", "diesel?/chrono" , "diesel?/uuid" , "diesel?/r2d2"] -migrations_postgres = ["migrations"] -migrations_sqlite = ["migrations"] -migrations_mysql = ["migrations"] +blocking = ["dep:diesel", "dep:diesel-derive-enum", "dep:dotenvy", "diesel?/chrono" , "diesel?/serde_json" , "diesel?/uuid", "diesel?/r2d2"] +blocking-postgres = [ "blocking", "diesel?/postgres"] +blocking-sqlite = ["blocking", "diesel?/sqlite" ] +blocking-mysql = [ "blocking", "diesel?/mysql"] +migrations-postgres = ["migrations"] +migrations-sqlite = ["migrations"] +migrations-mysql = ["migrations"] migrations = ["dep:diesel_migrations"] [dev-dependencies] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} +sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio", "postgres", "sqlite", "mysql"]} + [dependencies] cron = "0.12" @@ -47,7 +53,9 @@ typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} # sqlx with no TLS, if you want TLS you must to get feature "tls-native-tls" or "tls-rustls" -sqlx = {version = "0.7", features = ["any" , "macros" , "json" , "uuid" , "chrono" , "runtime-tokio", "postgres", "sqlite", "mysql"] } +#sqlx = {version = "0.7", features = ["any" , "macros" , "json" , "uuid" , "chrono" , "runtime-tokio", "postgres", "sqlite", "mysql"] } +# https://github.com/launchbadge/sqlx/issues/2416 is fixed in pxp9's fork +sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio"] , optional = true} [dependencies.diesel] version = "2.1" @@ -63,16 +71,6 @@ optional = true version = "0.15" optional = true -[dependencies.bb8-postgres] -version = "0.8" -features = ["with-serde_json-1" , "with-uuid-1" , "with-chrono-0_4"] -optional = true - -[dependencies.postgres-types] -version = "0.X.X" -features = ["derive"] -optional = true - [dependencies.tokio] version = "1.25" features = ["rt", "time", "macros"] diff --git a/fang/fang_examples/asynk/simple_async_worker/Cargo.toml b/fang/fang_examples/asynk/simple_async_worker/Cargo.toml index 5e9d2446..54c4a3db 100644 --- a/fang/fang_examples/asynk/simple_async_worker/Cargo.toml +++ b/fang/fang_examples/asynk/simple_async_worker/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -fang = { path = "../../../" , features = ["asynk", "postgres"]} +fang = { path = "../../../" , features = ["asynk-postgres", "migrations-postgres"]} env_logger = "0.9.0" log = "0.4.0" dotenvy = "0.15" diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml b/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml index cad5f651..7a6946ba 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml +++ b/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -fang = { path = "../../../" , features = ["asynk"]} +fang = { path = "../../../" , features = ["asynk-postgres"]} env_logger = "0.9.0" log = "0.4.0" dotenvy = "0.15" diff --git a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index afc60e3e..a3e39394 100644 --- a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -4,8 +4,8 @@ -- docker exec -ti mysql mysql -u root -pfang -P 3360 fang -e "$(catn fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql)" CREATE TABLE fang_tasks ( - id TEXT CHECK (LENGTH(id) = 36) NOT NULL PRIMARY KEY, -- UUID generated inside the language - -- why uuid is a text ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite + id BLOB NOT NULL PRIMARY KEY, -- UUID generated inside the language + -- why uuid is a blob ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite metadata TEXT NOT NULL, -- why metadata is text ? https://stackoverflow.com/questions/16603621/how-to-store-json-object-in-sqlite-database#16603687 error_message TEXT, @@ -16,10 +16,10 @@ CREATE TABLE fang_tasks ( retries INTEGER NOT NULL DEFAULT 0, -- The datetime() function returns the date and time as text in this formats: YYYY-MM-DD HH:MM:SS. -- https://www.sqlite.org/lang_datefunc.html - scheduled_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + scheduled_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00'), -- why timestamps are texts ? https://www.sqlite.org/datatype3.html - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + created_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00'), + updated_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00') ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index d3ea180f..dba32e92 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -15,7 +15,6 @@ use cron::Schedule; use sha2::{Digest, Sha256}; use sqlx::any::install_default_drivers; use sqlx::pool::PoolOptions; -use sqlx::types::Uuid; use sqlx::Acquire; use sqlx::Any; use sqlx::AnyPool; @@ -23,24 +22,46 @@ use sqlx::Transaction; use std::str::FromStr; use thiserror::Error; use typed_builder::TypedBuilder; +use uuid::Uuid; #[cfg(test)] use self::async_queue_tests::test_asynk_queue; -const INSERT_TASK_QUERY: &str = include_str!("queries/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY: &str = include_str!("queries/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY: &str = include_str!("queries/update_task_state.sql"); -const FAIL_TASK_QUERY: &str = include_str!("queries/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY: &str = include_str!("queries/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY: &str = - include_str!("queries/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY: &str = include_str!("queries/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY: &str = include_str!("queries/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY: &str = include_str!("queries/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY: &str = include_str!("queries/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY: &str = include_str!("queries/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY: &str = include_str!("queries/find_task_by_id.sql"); -const RETRY_TASK_QUERY: &str = include_str!("queries/retry_task.sql"); +const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_POSTGRES: &str = + include_str!("queries_postgres/update_task_state.sql"); +const FAIL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES: &str = + include_str!("queries_postgres/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_POSTGRES: &str = + include_str!("queries_postgres/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_POSTGRES: &str = + include_str!("queries_postgres/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_POSTGRES: &str = include_str!("queries_postgres/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES: &str = + include_str!("queries_postgres/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_POSTGRES: &str = include_str!("queries_postgres/find_task_by_id.sql"); +const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/retry_task.sql"); + +const INSERT_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_SQLITE: &str = include_str!("queries_sqlite/update_task_state.sql"); +const FAIL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE: &str = + include_str!("queries_sqlite/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_SQLITE: &str = + include_str!("queries_sqlite/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = + include_str!("queries_sqlite/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_task_by_id.sql"); +const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); pub const DEFAULT_TASK_TYPE: &str = "common"; @@ -159,28 +180,36 @@ pub struct AsyncQueue { max_pool_size: u32, #[builder(default = false, setter(skip))] connected: bool, + #[builder(default = "".to_string() , setter(skip))] + backend: String, } #[cfg(test)] use tokio::sync::Mutex; #[cfg(test)] -static ASYNC_QUEUE_DB_TEST_COUNTER: Mutex = Mutex::const_new(0); +static ASYNC_QUEUE_POSTGRES_TEST_COUNTER: Mutex = Mutex::const_new(0); + +#[cfg(test)] +static ASYNC_QUEUE_SQLITE_TEST_COUNTER: Mutex = Mutex::const_new(0); #[cfg(test)] use sqlx::Executor; +#[cfg(test)] +use std::path::Path; + #[cfg(test)] impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB - pub async fn test() -> Self { + pub async fn test_postgres() -> Self { const BASE_URI: &str = "postgres://postgres:postgres@localhost"; let mut res = Self::builder() .max_pool_size(1_u32) .uri(format!("{}/fang", BASE_URI)) .build(); - let mut new_number = ASYNC_QUEUE_DB_TEST_COUNTER.lock().await; + let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; res.connect().await.unwrap(); let db_name = format!("async_queue_test_{}", *new_number); @@ -214,6 +243,35 @@ impl AsyncQueue { res } + + /// Provides an AsyncQueue connected to its own DB + pub async fn test_sqlite() -> Self { + const BASE_FILE: &str = "../fang.db"; + + let mut new_number = ASYNC_QUEUE_SQLITE_TEST_COUNTER.lock().await; + + let db_name = format!("../tests_sqlite/async_queue_test_{}.db", *new_number); + *new_number += 1; + + let path = Path::new(&db_name); + + if path.exists() { + log::info!("Deleting database {db_name} ..."); + std::fs::remove_file(path).unwrap(); + } + + log::info!("Creating database {db_name} ..."); + std::fs::copy(BASE_FILE, &db_name).unwrap(); + log::info!("Database {db_name} created !!"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("sqlite://{}", db_name)) + .build(); + + res.connect().await.expect("fail to connect"); + res + } } impl AsyncQueue { @@ -235,6 +293,12 @@ impl AsyncQueue { .connect(&self.uri) .await?; + let conn = pool.acquire().await?; + + self.backend = conn.backend_name().to_string(); + + drop(conn); + self.pool = Some(pool); self.connected = true; Ok(()) @@ -242,8 +306,19 @@ impl AsyncQueue { async fn remove_all_tasks_query( transaction: &mut Transaction<'_, Any>, + backend: &str, ) -> Result { - Ok(sqlx::query(REMOVE_ALL_TASK_QUERY) + let query = if backend == "PostgreSQL" { + REMOVE_ALL_TASK_QUERY_POSTGRES + } else if backend == "SQLite" { + REMOVE_ALL_TASK_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + Ok(sqlx::query(query) .execute(transaction.acquire().await?) .await? .rows_affected()) @@ -251,11 +326,21 @@ impl AsyncQueue { async fn remove_all_scheduled_tasks_query( transaction: &mut Transaction<'_, Any>, + backend: &str, ) -> Result { - let now = Utc::now(); - let now_str = now.to_rfc3339(); + let query = if backend == "PostgreSQL" { + REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES + } else if backend == "SQLite" { + REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); - Ok(sqlx::query(REMOVE_ALL_SCHEDULED_TASK_QUERY) + Ok(sqlx::query(query) .bind(now_str) .execute(transaction.acquire().await?) .await? @@ -264,9 +349,20 @@ impl AsyncQueue { async fn remove_task_query( transaction: &mut Transaction<'_, Any>, + backend: &str, id: &[u8], ) -> Result { - let result = sqlx::query(REMOVE_TASK_QUERY) + let query = if backend == "PostgreSQL" { + REMOVE_TASK_QUERY_POSTGRES + } else if backend == "SQLite" { + REMOVE_TASK_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + let result = sqlx::query(query) .bind(id) .execute(transaction.acquire().await?) .await? @@ -284,13 +380,24 @@ impl AsyncQueue { async fn remove_task_by_metadata_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; let uniq_hash = Self::calculate_hash(metadata.to_string()); - Ok(sqlx::query(REMOVE_TASK_BY_METADATA_QUERY) + let query = if backend == "PostgreSQL" { + REMOVE_TASK_BY_METADATA_QUERY_POSTGRES + } else if backend == "SQLite" { + REMOVE_TASK_BY_METADATA_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + Ok(sqlx::query(query) .bind(uniq_hash) .execute(transaction.acquire().await?) .await? @@ -299,9 +406,20 @@ impl AsyncQueue { async fn remove_tasks_type_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task_type: &str, ) -> Result { - Ok(sqlx::query(REMOVE_TASKS_TYPE_QUERY) + let query = if backend == "PostgreSQL" { + REMOVE_TASKS_TYPE_QUERY_POSTGRES + } else if backend == "SQLite" { + REMOVE_TASKS_TYPE_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + Ok(sqlx::query(query) .bind(task_type) .execute(transaction.acquire().await?) .await? @@ -310,9 +428,20 @@ impl AsyncQueue { async fn find_task_by_id_query( transaction: &mut Transaction<'_, Any>, + backend: &str, id: &[u8], ) -> Result { - let task: Task = sqlx::query_as(FIND_TASK_BY_ID_QUERY) + let query = if backend == "PostgreSQL" { + FIND_TASK_BY_ID_QUERY_POSTGRES + } else if backend == "SQLite" { + FIND_TASK_BY_ID_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + let task: Task = sqlx::query_as(query) .bind(id) .fetch_one(transaction.acquire().await?) .await?; @@ -322,12 +451,23 @@ impl AsyncQueue { async fn fail_task_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task: &Task, error_message: &str, ) -> Result { - let updated_at = Utc::now().to_rfc3339(); + let query = if backend == "PostgreSQL" { + FAIL_TASK_QUERY_POSTGRES + } else if backend == "SQLite" { + FAIL_TASK_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); - let failed_task: Task = sqlx::query_as(FAIL_TASK_QUERY) + let failed_task: Task = sqlx::query_as(query) .bind(<&str>::from(FangTaskState::Failed)) .bind(error_message) .bind(updated_at) @@ -340,17 +480,29 @@ impl AsyncQueue { async fn schedule_retry_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task: &Task, backoff_seconds: u32, error: &str, ) -> Result { + let query = if backend == "PostgreSQL" { + RETRY_TASK_QUERY_POSTGRES + } else if backend == "SQLite" { + RETRY_TASK_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + let now = Utc::now(); - let now_str = now.to_rfc3339(); + let now_str = format!("{}", now.format("%F %T%.f+00")); + let scheduled_at = now + Duration::seconds(backoff_seconds as i64); - let scheduled_at_str = scheduled_at.to_rfc3339(); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); let retries = task.retries + 1; - let failed_task: Task = sqlx::query_as(RETRY_TASK_QUERY) + let failed_task: Task = sqlx::query_as(query) .bind(error) .bind(retries) .bind(scheduled_at_str) @@ -364,6 +516,7 @@ impl AsyncQueue { async fn fetch_and_touch_task_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task_type: Option, ) -> Result, AsyncQueueError> { let task_type = match task_type { @@ -371,14 +524,19 @@ impl AsyncQueue { None => DEFAULT_TASK_TYPE.to_string(), }; - let task = match Self::get_task_type_query(transaction, &task_type).await { + let task = match Self::get_task_type_query(transaction, backend, &task_type).await { Ok(some_task) => Some(some_task), Err(_) => None, }; let result_task = if let Some(some_task) = task { Some( - Self::update_task_state_query(transaction, &some_task, FangTaskState::InProgress) - .await?, + Self::update_task_state_query( + transaction, + backend, + &some_task, + FangTaskState::InProgress, + ) + .await?, ) } else { None @@ -388,12 +546,22 @@ impl AsyncQueue { async fn get_task_type_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task_type: &str, ) -> Result { - let now = Utc::now(); - let now_str = now.to_rfc3339(); + let query = if backend == "PostgreSQL" { + FETCH_TASK_TYPE_QUERY_POSTGRES + } else if backend == "SQLite" { + FETCH_TASK_TYPE_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); - let task: Task = sqlx::query_as(FETCH_TASK_TYPE_QUERY) + let task: Task = sqlx::query_as(query) .bind(task_type) .bind(now_str) .fetch_one(transaction.acquire().await?) @@ -404,15 +572,25 @@ impl AsyncQueue { async fn update_task_state_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task: &Task, state: FangTaskState, ) -> Result { - let updated_at = Utc::now(); - let updated_at_str = updated_at.to_rfc3339(); + let query = if backend == "PostgreSQL" { + UPDATE_TASK_STATE_QUERY_POSTGRES + } else if backend == "SQLite" { + UPDATE_TASK_STATE_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + + let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); let state_str: &str = state.into(); - let task: Task = sqlx::query_as(UPDATE_TASK_STATE_QUERY) + let task: Task = sqlx::query_as(query) .bind(state_str) .bind(updated_at_str) .bind(&task.id) @@ -424,17 +602,28 @@ impl AsyncQueue { async fn insert_task_query( transaction: &mut Transaction<'_, Any>, + backend: &str, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { + let query = if backend == "PostgreSQL" { + INSERT_TASK_QUERY_POSTGRES + } else if backend == "SQLite" { + INSERT_TASK_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + let uuid = Uuid::new_v4(); let bytes: &[u8] = &uuid.to_bytes_le(); let metadata_str = metadata.to_string(); - let scheduled_at_str = scheduled_at.to_rfc3339(); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); - let task: Task = sqlx::query_as(INSERT_TASK_QUERY) + let task: Task = sqlx::query_as(query) .bind(bytes) .bind(metadata_str) .bind(task_type) @@ -446,19 +635,30 @@ impl AsyncQueue { async fn insert_task_uniq_query( transaction: &mut Transaction<'_, Any>, + backend: &str, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { + let query = if backend == "PostgreSQL" { + INSERT_TASK_UNIQ_QUERY_POSTGRES + } else if backend == "SQLite" { + INSERT_TASK_UNIQ_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + let uuid = Uuid::new_v4(); let bytes: &[u8] = &uuid.to_bytes_le(); let uniq_hash = Self::calculate_hash(metadata.to_string()); let metadata_str = metadata.to_string(); - let scheduled_at_str = scheduled_at.to_rfc3339(); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); - let task: Task = sqlx::query_as(INSERT_TASK_UNIQ_QUERY) + let task: Task = sqlx::query_as(query) .bind(bytes) .bind(metadata_str) .bind(task_type) @@ -471,14 +671,22 @@ impl AsyncQueue { async fn insert_task_if_not_exist_query( transaction: &mut Transaction<'_, Any>, + backend: &str, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { - match Self::find_task_by_uniq_hash_query(transaction, &metadata).await { + match Self::find_task_by_uniq_hash_query(transaction, backend, &metadata).await { Some(task) => Ok(task), None => { - Self::insert_task_uniq_query(transaction, metadata, task_type, scheduled_at).await + Self::insert_task_uniq_query( + transaction, + backend, + metadata, + task_type, + scheduled_at, + ) + .await } } } @@ -492,11 +700,22 @@ impl AsyncQueue { async fn find_task_by_uniq_hash_query( transaction: &mut Transaction<'_, Any>, + backend: &str, metadata: &serde_json::Value, ) -> Option { + let query = if backend == "PostgreSQL" { + FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES + } else if backend == "SQLite" { + FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE + } else if backend == "MySQL" { + unimplemented!() + } else { + unreachable!() + }; + let uniq_hash = Self::calculate_hash(metadata.to_string()); - sqlx::query_as(FIND_TASK_BY_UNIQ_HASH_QUERY) + sqlx::query_as(query) .bind(uniq_hash) .fetch_one(transaction.acquire().await.ok()?) .await @@ -505,6 +724,7 @@ impl AsyncQueue { async fn schedule_task_query( transaction: &mut Transaction<'_, Any>, + backend: &str, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; @@ -528,10 +748,18 @@ impl AsyncQueue { }; let task: Task = if !task.uniq() { - Self::insert_task_query(transaction, metadata, &task.task_type(), scheduled_at).await? + Self::insert_task_query( + transaction, + backend, + metadata, + &task.task_type(), + scheduled_at, + ) + .await? } else { Self::insert_task_if_not_exist_query( transaction, + backend, metadata, &task.task_type(), scheduled_at, @@ -548,7 +776,7 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = Self::find_task_by_id_query(&mut transaction, id).await?; + let task = Self::find_task_by_id_query(&mut transaction, &self.backend, id).await?; transaction.commit().await?; @@ -562,7 +790,8 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = Self::fetch_and_touch_task_query(&mut transaction, task_type).await?; + let task = + Self::fetch_and_touch_task_query(&mut transaction, &self.backend, task_type).await?; transaction.commit().await?; @@ -575,11 +804,18 @@ impl AsyncQueueable for AsyncQueue { let metadata = serde_json::to_value(task)?; let task = if !task.uniq() { - Self::insert_task_query(&mut transaction, metadata, &task.task_type(), Utc::now()) - .await? + Self::insert_task_query( + &mut transaction, + &self.backend, + metadata, + &task.task_type(), + Utc::now(), + ) + .await? } else { Self::insert_task_if_not_exist_query( &mut transaction, + &self.backend, metadata, &task.task_type(), Utc::now(), @@ -596,7 +832,7 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = Self::schedule_task_query(&mut transaction, task).await?; + let task = Self::schedule_task_query(&mut transaction, &self.backend, task).await?; transaction.commit().await?; @@ -607,7 +843,7 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_all_tasks_query(&mut transaction).await?; + let result = Self::remove_all_tasks_query(&mut transaction, &self.backend).await?; transaction.commit().await?; @@ -618,7 +854,8 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_all_scheduled_tasks_query(&mut transaction).await?; + let result = + Self::remove_all_scheduled_tasks_query(&mut transaction, &self.backend).await?; transaction.commit().await?; @@ -629,7 +866,7 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_task_query(&mut transaction, id).await?; + let result = Self::remove_task_query(&mut transaction, &self.backend, id).await?; transaction.commit().await?; @@ -644,7 +881,8 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_task_by_metadata_query(&mut transaction, task).await?; + let result = + Self::remove_task_by_metadata_query(&mut transaction, &self.backend, task).await?; transaction.commit().await?; @@ -658,7 +896,8 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_tasks_type_query(&mut transaction, task_type).await?; + let result = + Self::remove_tasks_type_query(&mut transaction, &self.backend, task_type).await?; transaction.commit().await?; @@ -673,7 +912,8 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = Self::update_task_state_query(&mut transaction, task, state).await?; + let task = + Self::update_task_state_query(&mut transaction, &self.backend, task, state).await?; transaction.commit().await?; @@ -688,7 +928,8 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = Self::fail_task_query(&mut transaction, task, error_message).await?; + let task = + Self::fail_task_query(&mut transaction, &self.backend, task, error_message).await?; transaction.commit().await?; @@ -705,8 +946,14 @@ impl AsyncQueueable for AsyncQueue { let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let failed_task = - Self::schedule_retry_query(&mut transaction, task, backoff_seconds, error).await?; + let failed_task = Self::schedule_retry_query( + &mut transaction, + &self.backend, + task, + backoff_seconds, + error, + ) + .await?; transaction.commit().await?; @@ -715,4 +962,6 @@ impl AsyncQueueable for AsyncQueue { } #[cfg(test)] -test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test()} +test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} +#[cfg(test)] +test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index afd14314..26a0f8a3 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -227,7 +227,7 @@ macro_rules! test_asynk_queue { assert_eq!(Some(2), number); assert_eq!(Some("AsyncTask"), type_task); - let task = test.fetch_and_touch_task(None).await.unwrap().unwrap(); + let task = test.fetch_and_touch_task(None).await.unwrap().unwrap(); // This fails if this FOR UPDATE SKIP LOCKED is set in query fetch task type let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); @@ -251,9 +251,7 @@ macro_rules! test_asynk_queue { let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); - println!("{:?}", task.metadata); - - let metadata = task.metadata.as_object().expect("metadata F"); + let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); @@ -262,10 +260,7 @@ macro_rules! test_asynk_queue { let task = test.insert_task(&AsyncTask { number: 2 }).await.unwrap(); - let metadata = task - .metadata - .as_object() - .expect("borrado mi amigo salio mal"); + let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); @@ -293,7 +288,7 @@ macro_rules! test_asynk_queue { .await .unwrap(); - let metadata = task.metadata.as_object().unwrap(); + let metadata = task.metadata.as_object().expect("here 1"); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); @@ -305,7 +300,7 @@ macro_rules! test_asynk_queue { .await .unwrap(); - let metadata = task.metadata.as_object().unwrap(); + let metadata = task.metadata.as_object().expect("here 2"); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index d74d3f6e..8e30c4b8 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -386,7 +386,7 @@ mod async_worker_tests { #[tokio::test] async fn execute_and_finishes_task() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let actual_task = WorkerAsyncTask { number: 1 }; @@ -406,7 +406,7 @@ mod async_worker_tests { #[tokio::test] async fn schedule_task_test() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let actual_task = WorkerAsyncTaskSchedule { number: 1 }; @@ -437,7 +437,7 @@ mod async_worker_tests { #[tokio::test] async fn retries_task_test() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let actual_task = AsyncRetryTask {}; @@ -478,7 +478,7 @@ mod async_worker_tests { #[tokio::test] async fn saves_error_for_failed_task() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let failed_task = AsyncFailedTask { number: 1 }; let task = insert_task(&mut test, &failed_task).await; @@ -502,7 +502,7 @@ mod async_worker_tests { #[tokio::test] async fn executes_task_only_of_specific_type() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let task1 = insert_task(&mut test, &AsyncTaskType1 {}).await; let task12 = insert_task(&mut test, &AsyncTaskType1 {}).await; @@ -533,7 +533,7 @@ mod async_worker_tests { #[tokio::test] async fn remove_when_finished() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let task1 = insert_task(&mut test, &AsyncTaskType1 {}).await; let task12 = insert_task(&mut test, &AsyncTaskType1 {}).await; @@ -569,7 +569,7 @@ mod async_worker_tests { #[tokio::test] async fn no_schedule_until_run() { - let mut test = AsyncQueue::test().await; + let mut test = AsyncQueue::test_postgres().await; let _task_1 = test .schedule_task(&WorkerAsyncTaskScheduled {}) diff --git a/fang/src/asynk/queries/insert_task.sql b/fang/src/asynk/queries/insert_task.sql deleted file mode 100644 index 920af791..00000000 --- a/fang/src/asynk/queries/insert_task.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, to_json($2), $3, $4::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/insert_task_uniq.sql b/fang/src/asynk/queries/insert_task_uniq.sql deleted file mode 100644 index 5cccc807..00000000 --- a/fang/src/asynk/queries/insert_task_uniq.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, to_json($2) , $3, $4, $5::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql similarity index 100% rename from fang/src/asynk/queries/fail_task.sql rename to fang/src/asynk/queries_postgres/fail_task.sql diff --git a/fang/src/asynk/queries/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql similarity index 100% rename from fang/src/asynk/queries/fetch_task_type.sql rename to fang/src/asynk/queries_postgres/fetch_task_type.sql diff --git a/fang/src/asynk/queries/find_task_by_id.sql b/fang/src/asynk/queries_postgres/find_task_by_id.sql similarity index 100% rename from fang/src/asynk/queries/find_task_by_id.sql rename to fang/src/asynk/queries_postgres/find_task_by_id.sql diff --git a/fang/src/asynk/queries/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql similarity index 100% rename from fang/src/asynk/queries/find_task_by_uniq_hash.sql rename to fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql new file mode 100644 index 00000000..759d29d1 --- /dev/null +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, $4::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql new file mode 100644 index 00000000..19cdc4c6 --- /dev/null +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, $5::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql similarity index 100% rename from fang/src/asynk/queries/remove_all_scheduled_tasks.sql rename to fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql diff --git a/fang/src/asynk/queries/remove_all_tasks.sql b/fang/src/asynk/queries_postgres/remove_all_tasks.sql similarity index 100% rename from fang/src/asynk/queries/remove_all_tasks.sql rename to fang/src/asynk/queries_postgres/remove_all_tasks.sql diff --git a/fang/src/asynk/queries/remove_task.sql b/fang/src/asynk/queries_postgres/remove_task.sql similarity index 100% rename from fang/src/asynk/queries/remove_task.sql rename to fang/src/asynk/queries_postgres/remove_task.sql diff --git a/fang/src/asynk/queries/remove_task_by_metadata.sql b/fang/src/asynk/queries_postgres/remove_task_by_metadata.sql similarity index 100% rename from fang/src/asynk/queries/remove_task_by_metadata.sql rename to fang/src/asynk/queries_postgres/remove_task_by_metadata.sql diff --git a/fang/src/asynk/queries/remove_tasks_type.sql b/fang/src/asynk/queries_postgres/remove_tasks_type.sql similarity index 100% rename from fang/src/asynk/queries/remove_tasks_type.sql rename to fang/src/asynk/queries_postgres/remove_tasks_type.sql diff --git a/fang/src/asynk/queries/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql similarity index 100% rename from fang/src/asynk/queries/retry_task.sql rename to fang/src/asynk/queries_postgres/retry_task.sql diff --git a/fang/src/asynk/queries/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql similarity index 100% rename from fang/src/asynk/queries/update_task_state.sql rename to fang/src/asynk/queries_postgres/update_task_state.sql diff --git a/fang/src/asynk/queries_sqlite/fail_task.sql b/fang/src/asynk/queries_sqlite/fail_task.sql new file mode 100644 index 00000000..f8ae5f5b --- /dev/null +++ b/fang/src/asynk/queries_sqlite/fail_task.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = $1 , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_sqlite/fetch_task_type.sql b/fang/src/asynk/queries_sqlite/fetch_task_type.sql new file mode 100644 index 00000000..afe40422 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/fetch_task_type.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state, task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_sqlite/find_task_by_id.sql b/fang/src/asynk/queries_sqlite/find_task_by_id.sql new file mode 100644 index 00000000..60b4cf93 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/find_task_by_id.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1 diff --git a/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql new file mode 100644 index 00000000..d12443ad --- /dev/null +++ b/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_sqlite/insert_task.sql b/fang/src/asynk/queries_sqlite/insert_task.sql new file mode 100644 index 00000000..9cca503e --- /dev/null +++ b/fang/src/asynk/queries_sqlite/insert_task.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3, $4 ) RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_sqlite/insert_task_uniq.sql b/fang/src/asynk/queries_sqlite/insert_task_uniq.sql new file mode 100644 index 00000000..9ffc4499 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/insert_task_uniq.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2 , $3, $4, $5 ) RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql new file mode 100644 index 00000000..61a5b6b5 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE scheduled_at > $1 diff --git a/fang/src/asynk/queries_sqlite/remove_all_tasks.sql b/fang/src/asynk/queries_sqlite/remove_all_tasks.sql new file mode 100644 index 00000000..eaecbbaf --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_all_tasks.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" diff --git a/fang/src/asynk/queries_sqlite/remove_task.sql b/fang/src/asynk/queries_sqlite/remove_task.sql new file mode 100644 index 00000000..b6da69fd --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_task.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE id = $1 diff --git a/fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql b/fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql new file mode 100644 index 00000000..94324e2a --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE uniq_hash = $1 diff --git a/fang/src/asynk/queries_sqlite/remove_tasks_type.sql b/fang/src/asynk/queries_sqlite/remove_tasks_type.sql new file mode 100644 index 00000000..e4de9c0f --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_tasks_type.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE task_type = $1 diff --git a/fang/src/asynk/queries_sqlite/retry_task.sql b/fang/src/asynk/queries_sqlite/retry_task.sql new file mode 100644 index 00000000..ae0d95b1 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/retry_task.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5 RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_sqlite/update_task_state.sql b/fang/src/asynk/queries_sqlite/update_task_state.sql new file mode 100644 index 00000000..a24fddf8 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/update_task_state.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = $1 , "updated_at" = $2 WHERE id = $3 RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/blocking/sqlite_schema.rs b/fang/src/blocking/sqlite_schema.rs index 1062df45..cf39ca9b 100644 --- a/fang/src/blocking/sqlite_schema.rs +++ b/fang/src/blocking/sqlite_schema.rs @@ -2,15 +2,15 @@ diesel::table! { fang_tasks (id) { - id -> Text, + id -> Binary, metadata -> Text, error_message -> Nullable, state -> Text, task_type -> Text, uniq_hash -> Nullable, retries -> Integer, - scheduled_at -> Timestamp, - created_at -> Timestamp, - updated_at -> Timestamp, + scheduled_at -> Text, + created_at -> Text, + updated_at -> Text, } } diff --git a/fang/src/lib.rs b/fang/src/lib.rs index 061db51f..f7e04236 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -2,8 +2,11 @@ #[cfg(feature = "blocking")] use diesel::{Identifiable, Queryable}; +#[cfg(feature = "asynk-sqlx")] use sqlx::any::AnyRow; +#[cfg(feature = "asynk-sqlx")] use sqlx::FromRow; +#[cfg(feature = "asynk-sqlx")] use sqlx::Row; use std::time::Duration; use thiserror::Error; @@ -175,7 +178,7 @@ pub struct Task { pub updated_at: DateTime, } -#[cfg(feature = "asynk")] +#[cfg(feature = "asynk-sqlx")] impl<'a> FromRow<'a, AnyRow> for Task { fn from_row(row: &'a AnyRow) -> Result { let id: Vec = row.get("id"); @@ -183,15 +186,12 @@ impl<'a> FromRow<'a, AnyRow> for Task { let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); - let mut chars = raw.chars(); - chars.next(); - chars.next_back(); - let raw = chars.as_str(); - - let metadata: serde_json::Value = serde_json::from_str(raw).unwrap(); + // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting + let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); // This should be changed when issue https://github.com/launchbadge/sqlx/issues/2416 is fixed - let error_message: Option = row.try_get("error_message").ok(); + // Fixed in pxp9's fork + let error_message: Option = row.get("error_message"); let state_str: &str = row.get("state"); // will work if database cast json to string @@ -200,12 +200,15 @@ impl<'a> FromRow<'a, AnyRow> for Task { let task_type: String = row.get("task_type"); // This should be changed when issue https://github.com/launchbadge/sqlx/issues/2416 is fixed - let uniq_hash: Option = row.try_get("uniq_hash").ok(); + // Fixed in pxp9's fork + let uniq_hash: Option = row.get("uniq_hash"); let retries: i32 = row.get("retries"); let scheduled_at_str: &str = row.get("scheduled_at"); + println!("{}", scheduled_at_str); + let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") .unwrap() .into(); @@ -274,10 +277,6 @@ pub mod asynk; #[cfg(feature = "asynk")] pub use asynk::*; -#[cfg(feature = "asynk")] -#[doc(hidden)] -pub use bb8_postgres::tokio_postgres::tls::NoTls; - #[cfg(feature = "asynk")] #[doc(hidden)] pub use async_trait::async_trait; @@ -291,14 +290,14 @@ use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; #[cfg(feature = "migrations")] use std::error::Error as SomeError; -#[cfg(feature = "migrations_postgres")] +#[cfg(feature = "migrations-postgres")] use diesel::pg::Pg; -#[cfg(feature = "migrations_postgres")] +#[cfg(feature = "migrations-postgres")] pub const MIGRATIONS_POSTGRES: EmbeddedMigrations = embed_migrations!("postgres_migrations/migrations"); -#[cfg(feature = "migrations_postgres")] +#[cfg(feature = "migrations-postgres")] pub fn run_migrations_postgres( connection: &mut impl MigrationHarness, ) -> Result<(), Box> { @@ -307,13 +306,13 @@ pub fn run_migrations_postgres( Ok(()) } -#[cfg(feature = "migrations_mysql")] +#[cfg(feature = "migrations-mysql")] use diesel::mysql::Mysql; -#[cfg(feature = "migrations_mysql")] +#[cfg(feature = "migrations-mysql")] pub const MIGRATIONS_MYSQL: EmbeddedMigrations = embed_migrations!("mysql_migrations/migrations"); -#[cfg(feature = "migrations_mysql")] +#[cfg(feature = "migrations-mysql")] pub fn run_migrations_mysql( connection: &mut impl MigrationHarness, ) -> Result<(), Box> { @@ -322,13 +321,13 @@ pub fn run_migrations_mysql( Ok(()) } -#[cfg(feature = "migrations_sqlite")] +#[cfg(feature = "migrations-sqlite")] use diesel::sqlite::Sqlite; -#[cfg(feature = "migrations_sqlite")] +#[cfg(feature = "migrations-sqlite")] pub const MIGRATIONS_SQLITE: EmbeddedMigrations = embed_migrations!("sqlite_migrations/migrations"); -#[cfg(feature = "migrations_sqlite")] +#[cfg(feature = "migrations-sqlite")] pub fn run_migrations_sqlite( connection: &mut impl MigrationHarness, ) -> Result<(), Box> { From 63b39c380edc4e6991ad0d4fcd867ed8cd249504 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 28 Aug 2023 22:02:10 +0200 Subject: [PATCH 04/90] deleting some expects --- fang/src/asynk/async_queue/async_queue_tests.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index 26a0f8a3..24b8cbff 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -268,10 +268,7 @@ macro_rules! test_asynk_queue { assert_eq!(Some(2), number); assert_eq!(Some("AsyncTask"), type_task); - let result = test - .remove_tasks_type("mytype") - .await - .expect("el numero salio bad"); + let result = test.remove_tasks_type("mytype").await.unwrap(); assert_eq!(0, result); @@ -288,7 +285,7 @@ macro_rules! test_asynk_queue { .await .unwrap(); - let metadata = task.metadata.as_object().expect("here 1"); + let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); @@ -300,7 +297,7 @@ macro_rules! test_asynk_queue { .await .unwrap(); - let metadata = task.metadata.as_object().expect("here 2"); + let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); From fb7d4ba2402eaa28b33c814c6cdc1539d1399262 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 01:46:08 +0200 Subject: [PATCH 05/90] sqlite may work in workflow --- .github/workflows/rust.yml | 21 +++++++++++++++++-- fang/src/asynk/async_queue.rs | 10 +++++---- .../asynk/async_queue/async_queue_tests.rs | 2 ++ .../asynk/queries_sqlite/fetch_task_type.sql | 2 +- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6e6d43a1..a0d1ccff 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -49,15 +49,32 @@ jobs: command: clippy args: --verbose --all-targets --all-features -- -D warnings + - name: Install sqlite3 + - uses: awalsh128/cache-apt-pkgs-action@latest + with: + packages: sqlite3 + version: 1.0 + + - name: Create sqlite db + run: sqlite3 fang.db "VACUUM;" + - name: Install diesel-cli uses: actions-rs/cargo@v1 with: command: install - args: diesel_cli --no-default-features --features "postgres" + args: diesel_cli --no-default-features --features "postgres sqlite mysql" + + - name: Setup Sqlite db + working-directory: ./fang/sqlite_migrations + run: diesel setup --database-url "sqlite://../../fang.db" + + - name: Change working dir + working-directory: ./../.. + run: pwd - name: Setup Postgres db working-directory: ./fang/postgres_migrations - run: diesel setup + run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - name: Change working dir working-directory: ./../.. diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index dba32e92..03139b4a 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -524,10 +524,12 @@ impl AsyncQueue { None => DEFAULT_TASK_TYPE.to_string(), }; - let task = match Self::get_task_type_query(transaction, backend, &task_type).await { - Ok(some_task) => Some(some_task), - Err(_) => None, - }; + let task = Self::get_task_type_query(transaction, backend, &task_type) + .await + .ok(); + + println!("{task:?}"); + let result_task = if let Some(some_task) = task { Some( Self::update_task_state_query( diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index 24b8cbff..814234ef 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -227,6 +227,8 @@ macro_rules! test_asynk_queue { assert_eq!(Some(2), number); assert_eq!(Some("AsyncTask"), type_task); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let task = test.fetch_and_touch_task(None).await.unwrap().unwrap(); // This fails if this FOR UPDATE SKIP LOCKED is set in query fetch task type let metadata = task.metadata.as_object().unwrap(); diff --git a/fang/src/asynk/queries_sqlite/fetch_task_type.sql b/fang/src/asynk/queries_sqlite/fetch_task_type.sql index afe40422..02c3f9f4 100644 --- a/fang/src/asynk/queries_sqlite/fetch_task_type.sql +++ b/fang/src/asynk/queries_sqlite/fetch_task_type.sql @@ -1 +1 @@ -SELECT id , metadata , error_message, state, task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id , metadata , error_message, state, task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 From 2ff47e17d2f5a007b3fb4f821ae4c387f93f2e42 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 02:00:40 +0200 Subject: [PATCH 06/90] stupid workflow work --- .github/workflows/rust.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a0d1ccff..11231372 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -54,9 +54,6 @@ jobs: with: packages: sqlite3 version: 1.0 - - - name: Create sqlite db - run: sqlite3 fang.db "VACUUM;" - name: Install diesel-cli uses: actions-rs/cargo@v1 @@ -64,14 +61,6 @@ jobs: command: install args: diesel_cli --no-default-features --features "postgres sqlite mysql" - - name: Setup Sqlite db - working-directory: ./fang/sqlite_migrations - run: diesel setup --database-url "sqlite://../../fang.db" - - - name: Change working dir - working-directory: ./../.. - run: pwd - - name: Setup Postgres db working-directory: ./fang/postgres_migrations run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" From fdbe92435f0a4d0fb2db2fbfe644fd0b954c0065 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 02:06:13 +0200 Subject: [PATCH 07/90] plz work --- .github/workflows/rust.yml | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 11231372..de24fc29 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -50,10 +50,9 @@ jobs: args: --verbose --all-targets --all-features -- -D warnings - name: Install sqlite3 - - uses: awalsh128/cache-apt-pkgs-action@latest - with: - packages: sqlite3 - version: 1.0 + - run: | + sudo apt install -y sqlite3 + sqlite3 fang.db "VACUUM;" - name: Install diesel-cli uses: actions-rs/cargo@v1 @@ -61,6 +60,14 @@ jobs: command: install args: diesel_cli --no-default-features --features "postgres sqlite mysql" + - name: Setup Sqlite db + working-directory: ./fang/sqlite_migrations + run: diesel setup --database-url "sqlite3://../../fang.db" + + - name: Change working dir + working-directory: ./../.. + run: pwd + - name: Setup Postgres db working-directory: ./fang/postgres_migrations run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" From 20f376aeeb5b084c967be75b6ea8bdf6ee2b675b Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 02:11:55 +0200 Subject: [PATCH 08/90] plz work , think i have fixed --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index de24fc29..92a61006 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -50,7 +50,7 @@ jobs: args: --verbose --all-targets --all-features -- -D warnings - name: Install sqlite3 - - run: | + run: | sudo apt install -y sqlite3 sqlite3 fang.db "VACUUM;" From e79c0ee6747a52db066d717c4008d4e255de3e15 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 02:22:17 +0200 Subject: [PATCH 09/90] fix workflow , now will work :D --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 92a61006..c554a8a2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -53,6 +53,7 @@ jobs: run: | sudo apt install -y sqlite3 sqlite3 fang.db "VACUUM;" + mkdir tests_sqlite - name: Install diesel-cli uses: actions-rs/cargo@v1 From 7e1ac885d1c3e68c5a858a331811ca8204bb489c Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Tue, 29 Aug 2023 13:07:14 +0200 Subject: [PATCH 10/90] this should work --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c554a8a2..2986c4ef 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -63,7 +63,7 @@ jobs: - name: Setup Sqlite db working-directory: ./fang/sqlite_migrations - run: diesel setup --database-url "sqlite3://../../fang.db" + run: diesel migration run --database-url "sqlite3://../../fang.db" - name: Change working dir working-directory: ./../.. @@ -71,7 +71,7 @@ jobs: - name: Setup Postgres db working-directory: ./fang/postgres_migrations - run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" + run: diesel migration run --database-url "postgres://postgres:postgres@localhost/fang" - name: Change working dir working-directory: ./../.. From 4f308f24744ebe9698d5ef20b4bb71404b587734 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Tue, 29 Aug 2023 13:14:35 +0200 Subject: [PATCH 11/90] this will definitely work --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2986c4ef..87d0e34a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -71,7 +71,7 @@ jobs: - name: Setup Postgres db working-directory: ./fang/postgres_migrations - run: diesel migration run --database-url "postgres://postgres:postgres@localhost/fang" + run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - name: Change working dir working-directory: ./../.. From 204a38a3c963a1c180150d5bfef37e7ad915d3ff Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Tue, 29 Aug 2023 13:27:27 +0200 Subject: [PATCH 12/90] clean up so it works incorrectly faster --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 87d0e34a..a6034a1e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -47,7 +47,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --verbose --all-targets --all-features -- -D warnings + args: --verbose --all-targets --all-features --no-deps -- -D warnings - name: Install sqlite3 run: | @@ -63,7 +63,7 @@ jobs: - name: Setup Sqlite db working-directory: ./fang/sqlite_migrations - run: diesel migration run --database-url "sqlite3://../../fang.db" + run: diesel setup --database-url "sqlite3://../../fang.db" - name: Change working dir working-directory: ./../.. From fdb4f1e00d85be7e681a222038a6323217ffab1c Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Tue, 29 Aug 2023 13:29:59 +0200 Subject: [PATCH 13/90] okay that didn't work so idk what i'm doing here --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a6034a1e..c554a8a2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -47,7 +47,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --verbose --all-targets --all-features --no-deps -- -D warnings + args: --verbose --all-targets --all-features -- -D warnings - name: Install sqlite3 run: | From 5ba991081a43eeb2be380c2c7fcd69aca599f696 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Tue, 29 Aug 2023 13:33:17 +0200 Subject: [PATCH 14/90] if this works i'm dropping out of university --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c554a8a2..ee5a380a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -63,7 +63,7 @@ jobs: - name: Setup Sqlite db working-directory: ./fang/sqlite_migrations - run: diesel setup --database-url "sqlite3://../../fang.db" + run: diesel setup --database-url "sqlite3://../../../fang.db" - name: Change working dir working-directory: ./../.. From 5119dcf870a991ef2fcbbdcb0b471dec92739f89 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 23:14:55 +0200 Subject: [PATCH 15/90] store uuid as text , so this way `PostgreSQL` schema will not change --- .../up.sql | 2 +- .../up.sql | 2 +- fang/src/asynk/async_queue.rs | 47 +++++++++++++------ .../asynk/async_queue/async_queue_tests.rs | 4 +- fang/src/asynk/async_runnable.rs | 2 +- fang/src/asynk/async_worker.rs | 8 ++-- fang/src/asynk/queries_postgres/fail_task.sql | 2 +- .../queries_postgres/fetch_task_type.sql | 2 +- .../queries_postgres/find_task_by_id.sql | 2 +- .../find_task_by_uniq_hash.sql | 2 +- .../asynk/queries_postgres/insert_task.sql | 2 +- .../queries_postgres/insert_task_uniq.sql | 2 +- .../asynk/queries_postgres/remove_task.sql | 2 +- .../src/asynk/queries_postgres/retry_task.sql | 2 +- .../queries_postgres/update_task_state.sql | 2 +- fang/src/blocking/postgres_schema.rs | 2 +- fang/src/blocking/queue.rs | 22 +++------ fang/src/blocking/sqlite_schema.rs | 2 +- fang/src/lib.rs | 7 ++- 19 files changed, 63 insertions(+), 53 deletions(-) diff --git a/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql b/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql index 572b2bc4..eee11c74 100644 --- a/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql +++ b/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql @@ -3,7 +3,7 @@ CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; CREATE TYPE fang_task_state AS ENUM ('new', 'in_progress', 'failed', 'finished', 'retried'); CREATE TABLE fang_tasks ( - id BYTEA PRIMARY KEY, + id uuid PRIMARY KEY DEFAULT uuid_generate_v4(), metadata jsonb NOT NULL, error_message TEXT, state fang_task_state DEFAULT 'new' NOT NULL, diff --git a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index a3e39394..d289cddf 100644 --- a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -4,7 +4,7 @@ -- docker exec -ti mysql mysql -u root -pfang -P 3360 fang -e "$(catn fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql)" CREATE TABLE fang_tasks ( - id BLOB NOT NULL PRIMARY KEY, -- UUID generated inside the language + id TEXT CHECK (LENGTH(id) = 36) NOT NULL PRIMARY KEY, -- UUID generated inside the language -- why uuid is a blob ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite metadata TEXT NOT NULL, -- why metadata is text ? https://stackoverflow.com/questions/16603621/how-to-store-json-object-in-sqlite-database#16603687 diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 03139b4a..08b5dca1 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -117,7 +117,7 @@ pub trait AsyncQueueable: Send { async fn remove_all_scheduled_tasks(&mut self) -> Result; /// Remove a task by its id. - async fn remove_task(&mut self, id: &[u8]) -> Result; + async fn remove_task(&mut self, id: &Uuid) -> Result; /// Remove a task by its metadata (struct fields values) async fn remove_task_by_metadata( @@ -129,7 +129,7 @@ pub trait AsyncQueueable: Send { async fn remove_tasks_type(&mut self, task_type: &str) -> Result; /// Retrieve a task from storage by its `id`. - async fn find_task_by_id(&mut self, id: &[u8]) -> Result; + async fn find_task_by_id(&mut self, id: &Uuid) -> Result; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. @@ -350,7 +350,7 @@ impl AsyncQueue { async fn remove_task_query( transaction: &mut Transaction<'_, Any>, backend: &str, - id: &[u8], + id: &Uuid, ) -> Result { let query = if backend == "PostgreSQL" { REMOVE_TASK_QUERY_POSTGRES @@ -362,8 +362,11 @@ impl AsyncQueue { unreachable!() }; + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + let result = sqlx::query(query) - .bind(id) + .bind(&*uuid_as_text) .execute(transaction.acquire().await?) .await? .rows_affected(); @@ -429,7 +432,7 @@ impl AsyncQueue { async fn find_task_by_id_query( transaction: &mut Transaction<'_, Any>, backend: &str, - id: &[u8], + id: &Uuid, ) -> Result { let query = if backend == "PostgreSQL" { FIND_TASK_BY_ID_QUERY_POSTGRES @@ -441,8 +444,11 @@ impl AsyncQueue { unreachable!() }; + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + let task: Task = sqlx::query_as(query) - .bind(id) + .bind(&*uuid_as_text) .fetch_one(transaction.acquire().await?) .await?; @@ -467,11 +473,14 @@ impl AsyncQueue { let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = task.id.as_hyphenated().encode_lower(&mut buffer); + let failed_task: Task = sqlx::query_as(query) .bind(<&str>::from(FangTaskState::Failed)) .bind(error_message) .bind(updated_at) - .bind(&task.id) + .bind(&*uuid_as_text) .fetch_one(transaction.acquire().await?) .await?; @@ -502,12 +511,15 @@ impl AsyncQueue { let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); let retries = task.retries + 1; + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = task.id.as_hyphenated().encode_lower(&mut buffer); + let failed_task: Task = sqlx::query_as(query) .bind(error) .bind(retries) .bind(scheduled_at_str) .bind(now_str) - .bind(&task.id) + .bind(&*uuid_as_text) .fetch_one(transaction.acquire().await?) .await?; @@ -592,10 +604,13 @@ impl AsyncQueue { let state_str: &str = state.into(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = task.id.as_hyphenated().encode_lower(&mut buffer); + let task: Task = sqlx::query_as(query) .bind(state_str) .bind(updated_at_str) - .bind(&task.id) + .bind(&*uuid_as_text) .fetch_one(transaction.acquire().await?) .await?; @@ -620,13 +635,14 @@ impl AsyncQueue { }; let uuid = Uuid::new_v4(); - let bytes: &[u8] = &uuid.to_bytes_le(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); let metadata_str = metadata.to_string(); let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); let task: Task = sqlx::query_as(query) - .bind(bytes) + .bind(&*uuid_as_str) .bind(metadata_str) .bind(task_type) .bind(scheduled_at_str) @@ -653,7 +669,8 @@ impl AsyncQueue { }; let uuid = Uuid::new_v4(); - let bytes: &[u8] = &uuid.to_bytes_le(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); let uniq_hash = Self::calculate_hash(metadata.to_string()); @@ -661,7 +678,7 @@ impl AsyncQueue { let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); let task: Task = sqlx::query_as(query) - .bind(bytes) + .bind(&*uuid_as_str) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) @@ -774,7 +791,7 @@ impl AsyncQueue { #[async_trait] impl AsyncQueueable for AsyncQueue { - async fn find_task_by_id(&mut self, id: &[u8]) -> Result { + async fn find_task_by_id(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; @@ -864,7 +881,7 @@ impl AsyncQueueable for AsyncQueue { Ok(result) } - async fn remove_task(&mut self, id: &[u8]) -> Result { + async fn remove_task(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index 814234ef..87df1c1c 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -98,7 +98,7 @@ macro_rules! test_asynk_queue { let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); - let id: &[u8] = &task.id; + let id = task.id; assert_eq!(Some(1), number); assert_eq!(Some("AsyncTask"), type_task); @@ -121,7 +121,7 @@ macro_rules! test_asynk_queue { let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); - let id: &[u8] = &task.id; + let id = task.id; assert_eq!(Some(1), number); assert_eq!(Some("AsyncTask"), type_task); diff --git a/fang/src/asynk/async_runnable.rs b/fang/src/asynk/async_runnable.rs index 380c05ca..3a73148d 100644 --- a/fang/src/asynk/async_runnable.rs +++ b/fang/src/asynk/async_runnable.rs @@ -32,7 +32,7 @@ impl From for FangError { /// Implement this trait to run your custom tasks. #[typetag::serde(tag = "type")] #[async_trait] -pub trait AsyncRunnable: Send + Sync + Send { +pub trait AsyncRunnable: Send + Sync { /// Execute the task. This method should define its logic async fn run(&self, client: &mut dyn AsyncQueueable) -> Result<(), FangError>; diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index 8e30c4b8..90422f4e 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -391,7 +391,7 @@ mod async_worker_tests { let actual_task = WorkerAsyncTask { number: 1 }; let task = insert_task(&mut test, &actual_task).await; - let id: &[u8] = &task.id; + let id = task.id; let mut worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) @@ -399,7 +399,7 @@ mod async_worker_tests { .build(); worker.run(&task, &actual_task).await.unwrap(); - let task_finished = test.find_task_by_id(id).await.unwrap(); + let task_finished = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task_finished.id); assert_eq!(FangTaskState::Finished, task_finished.state); } @@ -482,7 +482,7 @@ mod async_worker_tests { let failed_task = AsyncFailedTask { number: 1 }; let task = insert_task(&mut test, &failed_task).await; - let id: &[u8] = &task.id; + let id = task.id; let mut worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) @@ -490,7 +490,7 @@ mod async_worker_tests { .build(); worker.run(&task, &failed_task).await.unwrap(); - let task_finished = test.find_task_by_id(id).await.unwrap(); + let task_finished = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task_finished.id); assert_eq!(FangTaskState::Failed, task_finished.state); diff --git a/fang/src/asynk/queries_postgres/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql index 319449e4..01c24020 100644 --- a/fang/src/asynk/queries_postgres/fail_task.sql +++ b/fang/src/asynk/queries_postgres/fail_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3::timestamptz WHERE id = $4 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3::timestamptz WHERE id = $4::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql index cafaa556..0bbd61f2 100644 --- a/fang/src/asynk/queries_postgres/fetch_task_type.sql +++ b/fang/src/asynk/queries_postgres/fetch_task_type.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2::timestamptz >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2::timestamptz >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_postgres/find_task_by_id.sql b/fang/src/asynk/queries_postgres/find_task_by_id.sql index 1a26b723..d6e9ee80 100644 --- a/fang/src/asynk/queries_postgres/find_task_by_id.sql +++ b/fang/src/asynk/queries_postgres/find_task_by_id.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE id = $1 +SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE id = $1::uuid diff --git a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql index 49f26982..df3d3aa7 100644 --- a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql +++ b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 +SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql index 759d29d1..a9c1fc3d 100644 --- a/fang/src/asynk/queries_postgres/insert_task.sql +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, $4::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1::uuid, $2::jsonb, $3, $4::timestamptz ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql index 19cdc4c6..99674daf 100644 --- a/fang/src/asynk/queries_postgres/insert_task_uniq.sql +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, $5::timestamptz ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1::uuid, $2::jsonb , $3, $4, $5::timestamptz ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/remove_task.sql b/fang/src/asynk/queries_postgres/remove_task.sql index b6da69fd..e6a2261c 100644 --- a/fang/src/asynk/queries_postgres/remove_task.sql +++ b/fang/src/asynk/queries_postgres/remove_task.sql @@ -1 +1 @@ -DELETE FROM "fang_tasks" WHERE id = $1 +DELETE FROM "fang_tasks" WHERE id = $1::uuid diff --git a/fang/src/asynk/queries_postgres/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql index 24fad7ea..8933dc4e 100644 --- a/fang/src/asynk/queries_postgres/retry_task.sql +++ b/fang/src/asynk/queries_postgres/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3::timestamptz, "updated_at" = $4::timestamptz WHERE id = $5 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3::timestamptz, "updated_at" = $4::timestamptz WHERE id = $5::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql index eef72eb5..a3602bfd 100644 --- a/fang/src/asynk/queries_postgres/update_task_state.sql +++ b/fang/src/asynk/queries_postgres/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2::timestamptz WHERE id = $3 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2::timestamptz WHERE id = $3::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/blocking/postgres_schema.rs b/fang/src/blocking/postgres_schema.rs index b0abc78d..89234568 100644 --- a/fang/src/blocking/postgres_schema.rs +++ b/fang/src/blocking/postgres_schema.rs @@ -11,7 +11,7 @@ diesel::table! { use super::sql_types::FangTaskState; fang_tasks (id) { - id -> Bytea, + id -> Uuid, metadata -> Jsonb, error_message -> Nullable, state -> FangTaskState, diff --git a/fang/src/blocking/queue.rs b/fang/src/blocking/queue.rs index 86bf1f6f..a53542c4 100644 --- a/fang/src/blocking/queue.rs +++ b/fang/src/blocking/queue.rs @@ -35,8 +35,6 @@ pub type PoolConnection = PooledConnection>; #[derive(Insertable, Debug, Eq, PartialEq, Clone, TypedBuilder)] #[diesel(table_name = fang_tasks)] pub struct NewTask { - #[builder(setter(into))] - id: Vec, #[builder(setter(into))] metadata: serde_json::Value, #[builder(setter(into))] @@ -87,14 +85,14 @@ pub trait Queueable { fn remove_tasks_of_type(&self, task_type: &str) -> Result; /// Remove a task by its id. - fn remove_task(&self, id: &[u8]) -> Result; + fn remove_task(&self, id: &Uuid) -> Result; /// To use this function task has to be uniq. uniq() has to return true. /// If task is not uniq this function will not do anything. /// Remove a task by its metadata (struct fields values) fn remove_task_by_metadata(&self, task: &dyn Runnable) -> Result; - fn find_task_by_id(&self, id: &[u8]) -> Option; + fn find_task_by_id(&self, id: &Uuid) -> Option; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. @@ -177,7 +175,7 @@ impl Queueable for Queue { Self::remove_tasks_of_type_query(&mut connection, task_type) } - fn remove_task(&self, id: &[u8]) -> Result { + fn remove_task(&self, id: &Uuid) -> Result { let mut connection = self.get_connection()?; Self::remove_task_query(&mut connection, id) @@ -207,7 +205,7 @@ impl Queueable for Queue { Self::fail_task_query(&mut connection, task, error) } - fn find_task_by_id(&self, id: &[u8]) -> Option { + fn find_task_by_id(&self, id: &Uuid) -> Option { let mut connection = self.get_connection().unwrap(); Self::find_task_by_id_query(&mut connection, id) @@ -287,11 +285,7 @@ impl Queue { scheduled_at: DateTime, ) -> Result { if !params.uniq() { - let uuid = Uuid::new_v4(); - let id: Vec = uuid.to_bytes_le().to_vec(); - let new_task = NewTask::builder() - .id(id) .scheduled_at(scheduled_at) .uniq_hash(None) .task_type(params.task_type()) @@ -309,11 +303,7 @@ impl Queue { match Self::find_task_by_uniq_hash_query(connection, &uniq_hash) { Some(task) => Ok(task), None => { - let uuid = Uuid::new_v4(); - let id: Vec = uuid.to_bytes_le().to_vec(); - let new_task = NewTask::builder() - .id(id) .scheduled_at(scheduled_at) .uniq_hash(Some(uniq_hash)) .task_type(params.task_type()) @@ -354,7 +344,7 @@ impl Queue { }) } - pub fn find_task_by_id_query(connection: &mut PgConnection, id: &[u8]) -> Option { + pub fn find_task_by_id_query(connection: &mut PgConnection, id: &Uuid) -> Option { fang_tasks::table .filter(fang_tasks::id.eq(id)) .first::(connection) @@ -397,7 +387,7 @@ impl Queue { pub fn remove_task_query( connection: &mut PgConnection, - id: &[u8], + id: &Uuid, ) -> Result { let query = fang_tasks::table.filter(fang_tasks::id.eq(id)); diff --git a/fang/src/blocking/sqlite_schema.rs b/fang/src/blocking/sqlite_schema.rs index cf39ca9b..088b9f24 100644 --- a/fang/src/blocking/sqlite_schema.rs +++ b/fang/src/blocking/sqlite_schema.rs @@ -2,7 +2,7 @@ diesel::table! { fang_tasks (id) { - id -> Binary, + id -> Text, metadata -> Text, error_message -> Nullable, state -> Text, diff --git a/fang/src/lib.rs b/fang/src/lib.rs index f7e04236..1db8e4f0 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -11,6 +11,7 @@ use sqlx::Row; use std::time::Duration; use thiserror::Error; use typed_builder::TypedBuilder; +use uuid::Uuid; /// /// Represents a schedule for scheduled tasks. /// It's used in the [`AsyncRunnable::cron`] and [`Runnable::cron`] @@ -157,7 +158,7 @@ impl From for &str { #[diesel(table_name = fang_tasks)] pub struct Task { #[builder(setter(into))] - pub id: Vec, + pub id: Uuid, #[builder(setter(into))] pub metadata: serde_json::Value, #[builder(setter(into))] @@ -181,7 +182,9 @@ pub struct Task { #[cfg(feature = "asynk-sqlx")] impl<'a> FromRow<'a, AnyRow> for Task { fn from_row(row: &'a AnyRow) -> Result { - let id: Vec = row.get("id"); + let uuid_as_text: &str = row.get("id"); + + let id = Uuid::parse_str(uuid_as_text).unwrap(); let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); From 168b47671a5de8aa5d085cf792954a848acfe283 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 23:20:32 +0200 Subject: [PATCH 16/90] fix comment --- .../migrations/2023-08-17-102017_create_fang_tasks/up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index d289cddf..2dc9b9e1 100644 --- a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -5,7 +5,7 @@ CREATE TABLE fang_tasks ( id TEXT CHECK (LENGTH(id) = 36) NOT NULL PRIMARY KEY, -- UUID generated inside the language - -- why uuid is a blob ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite + -- why uuid is a text ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite metadata TEXT NOT NULL, -- why metadata is text ? https://stackoverflow.com/questions/16603621/how-to-store-json-object-in-sqlite-database#16603687 error_message TEXT, From f4459d92d5ae196a038f68b42d198e9c4134a486 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 29 Aug 2023 23:24:29 +0200 Subject: [PATCH 17/90] fix clippy --- fang/src/asynk/async_queue.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 08b5dca1..6efc08e0 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -642,7 +642,7 @@ impl AsyncQueue { let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); let task: Task = sqlx::query_as(query) - .bind(&*uuid_as_str) + .bind(uuid_as_str) .bind(metadata_str) .bind(task_type) .bind(scheduled_at_str) @@ -678,7 +678,7 @@ impl AsyncQueue { let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); let task: Task = sqlx::query_as(query) - .bind(&*uuid_as_str) + .bind(uuid_as_str) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) From bf53cfa6af1ba4f70e113e5f20438cb8e41e9ca7 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 30 Aug 2023 01:03:38 +0200 Subject: [PATCH 18/90] implement backend sqlx trait that allows better code --- .../asynk/simple_async_worker/src/main.rs | 4 +- .../simple_cron_async_worker/src/main.rs | 4 +- fang/src/asynk.rs | 2 + fang/src/asynk/async_queue.rs | 243 +++++------------- fang/src/asynk/async_worker.rs | 3 +- fang/src/asynk/backend_sqlx.rs | 98 +++++++ 6 files changed, 173 insertions(+), 181 deletions(-) create mode 100644 fang/src/asynk/backend_sqlx.rs diff --git a/fang/fang_examples/asynk/simple_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_async_worker/src/main.rs index fdc7eef1..b4a60526 100644 --- a/fang/fang_examples/asynk/simple_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_async_worker/src/main.rs @@ -7,6 +7,7 @@ use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::run_migrations_postgres; use fang::AsyncRunnable; +use fang::BackendSqlXPg; use simple_async_worker::MyFailingTask; use simple_async_worker::MyTask; use std::env; @@ -33,12 +34,13 @@ async fn main() { let mut queue = AsyncQueue::builder() .uri(database_url) .max_pool_size(max_pool_size) + .backend(BackendSqlXPg {}) .build(); queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs index 34709be3..66a27cc1 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs @@ -3,6 +3,7 @@ use fang::asynk::async_queue::AsyncQueue; use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::AsyncRunnable; +use fang::BackendSqlXPg; use simple_cron_async_worker::MyCronTask; use std::env; use std::time::Duration; @@ -18,12 +19,13 @@ async fn main() { let mut queue = AsyncQueue::builder() .uri(database_url) .max_pool_size(max_pool_size) + .backend(BackendSqlXPg {}) .build(); queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/src/asynk.rs b/fang/src/asynk.rs index a75dd036..ab11ea3a 100644 --- a/fang/src/asynk.rs +++ b/fang/src/asynk.rs @@ -2,8 +2,10 @@ pub mod async_queue; pub mod async_runnable; pub mod async_worker; pub mod async_worker_pool; +pub mod backend_sqlx; pub use async_queue::*; pub use async_runnable::AsyncRunnable; pub use async_worker::*; pub use async_worker_pool::*; +pub use backend_sqlx::*; diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 6efc08e0..42f24d12 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -27,42 +27,6 @@ use uuid::Uuid; #[cfg(test)] use self::async_queue_tests::test_asynk_queue; -const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY_POSTGRES: &str = - include_str!("queries_postgres/update_task_state.sql"); -const FAIL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES: &str = - include_str!("queries_postgres/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY_POSTGRES: &str = - include_str!("queries_postgres/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY_POSTGRES: &str = - include_str!("queries_postgres/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY_POSTGRES: &str = include_str!("queries_postgres/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES: &str = - include_str!("queries_postgres/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY_POSTGRES: &str = include_str!("queries_postgres/find_task_by_id.sql"); -const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/retry_task.sql"); - -const INSERT_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY_SQLITE: &str = include_str!("queries_sqlite/update_task_state.sql"); -const FAIL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE: &str = - include_str!("queries_sqlite/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY_SQLITE: &str = - include_str!("queries_sqlite/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = - include_str!("queries_sqlite/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_task_by_id.sql"); -const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); - pub const DEFAULT_TASK_TYPE: &str = "common"; #[derive(Debug, Error)] @@ -79,6 +43,8 @@ pub enum AsyncQueueError { "AsyncQueue is not connected :( , call connect() method first and then perform operations" )] NotConnectedError, + #[error("AsyncQueue generic does not correspond to uri Backend")] + ConnectionError, #[error("Can not convert `std::time::Duration` to `chrono::Duration`")] TimeError, #[error("Can not perform this operation if task is not uniq, please check its definition in impl AsyncRunnable")] @@ -171,7 +137,10 @@ pub trait AsyncQueueable: Send { /// #[derive(TypedBuilder, Debug, Clone)] -pub struct AsyncQueue { +pub struct AsyncQueue +where + Backend: BackendSqlX, +{ #[builder(default=None, setter(skip))] pool: Option, #[builder(setter(into))] @@ -180,8 +149,7 @@ pub struct AsyncQueue { max_pool_size: u32, #[builder(default = false, setter(skip))] connected: bool, - #[builder(default = "".to_string() , setter(skip))] - backend: String, + backend: Backend, } #[cfg(test)] @@ -199,14 +167,23 @@ use sqlx::Executor; #[cfg(test)] use std::path::Path; +use super::backend_sqlx::BackendSqlX; + #[cfg(test)] -impl AsyncQueue { +use super::backend_sqlx::BackendSqlXSQLite; + +#[cfg(test)] +use super::backend_sqlx::BackendSqlXPg; + +#[cfg(test)] +impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_postgres() -> Self { const BASE_URI: &str = "postgres://postgres:postgres@localhost"; let mut res = Self::builder() .max_pool_size(1_u32) .uri(format!("{}/fang", BASE_URI)) + .backend(BackendSqlXPg {}) .build(); let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; @@ -243,7 +220,10 @@ impl AsyncQueue { res } +} +#[cfg(test)] +impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_sqlite() -> Self { const BASE_FILE: &str = "../fang.db"; @@ -267,6 +247,7 @@ impl AsyncQueue { let mut res = Self::builder() .max_pool_size(1_u32) .uri(format!("sqlite://{}", db_name)) + .backend(BackendSqlXSQLite {}) .build(); res.connect().await.expect("fail to connect"); @@ -274,7 +255,10 @@ impl AsyncQueue { } } -impl AsyncQueue { +impl AsyncQueue +where + Backend: BackendSqlX, +{ /// Check if the connection with db is established pub fn check_if_connection(&self) -> Result<(), AsyncQueueError> { if self.connected { @@ -295,10 +279,14 @@ impl AsyncQueue { let conn = pool.acquire().await?; - self.backend = conn.backend_name().to_string(); + let backend = conn.backend_name().to_string(); drop(conn); + if self.backend.name() != backend { + return Err(AsyncQueueError::ConnectionError); + } + self.pool = Some(pool); self.connected = true; Ok(()) @@ -306,17 +294,9 @@ impl AsyncQueue { async fn remove_all_tasks_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, ) -> Result { - let query = if backend == "PostgreSQL" { - REMOVE_ALL_TASK_QUERY_POSTGRES - } else if backend == "SQLite" { - REMOVE_ALL_TASK_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("REMOVE_ALL_TASK_QUERY"); Ok(sqlx::query(query) .execute(transaction.acquire().await?) @@ -326,17 +306,9 @@ impl AsyncQueue { async fn remove_all_scheduled_tasks_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, ) -> Result { - let query = if backend == "PostgreSQL" { - REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES - } else if backend == "SQLite" { - REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("REMOVE_ALL_SCHEDULED_TASK_QUERY"); let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -349,18 +321,10 @@ impl AsyncQueue { async fn remove_task_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, id: &Uuid, ) -> Result { - let query = if backend == "PostgreSQL" { - REMOVE_TASK_QUERY_POSTGRES - } else if backend == "SQLite" { - REMOVE_TASK_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("REMOVE_TASK_QUERY"); let mut buffer = Uuid::encode_buffer(); let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); @@ -383,22 +347,14 @@ impl AsyncQueue { async fn remove_task_by_metadata_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; let uniq_hash = Self::calculate_hash(metadata.to_string()); - let query = if backend == "PostgreSQL" { - REMOVE_TASK_BY_METADATA_QUERY_POSTGRES - } else if backend == "SQLite" { - REMOVE_TASK_BY_METADATA_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("REMOVE_TASK_BY_METADATA_QUERY"); Ok(sqlx::query(query) .bind(uniq_hash) @@ -409,18 +365,10 @@ impl AsyncQueue { async fn remove_tasks_type_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task_type: &str, ) -> Result { - let query = if backend == "PostgreSQL" { - REMOVE_TASKS_TYPE_QUERY_POSTGRES - } else if backend == "SQLite" { - REMOVE_TASKS_TYPE_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("REMOVE_TASKS_TYPE_QUERY"); Ok(sqlx::query(query) .bind(task_type) @@ -431,18 +379,10 @@ impl AsyncQueue { async fn find_task_by_id_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, id: &Uuid, ) -> Result { - let query = if backend == "PostgreSQL" { - FIND_TASK_BY_ID_QUERY_POSTGRES - } else if backend == "SQLite" { - FIND_TASK_BY_ID_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("FIND_TASK_BY_ID_QUERY"); let mut buffer = Uuid::encode_buffer(); let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); @@ -457,19 +397,11 @@ impl AsyncQueue { async fn fail_task_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task: &Task, error_message: &str, ) -> Result { - let query = if backend == "PostgreSQL" { - FAIL_TASK_QUERY_POSTGRES - } else if backend == "SQLite" { - FAIL_TASK_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("FAIL_TASK_QUERY"); let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -489,20 +421,12 @@ impl AsyncQueue { async fn schedule_retry_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task: &Task, backoff_seconds: u32, error: &str, ) -> Result { - let query = if backend == "PostgreSQL" { - RETRY_TASK_QUERY_POSTGRES - } else if backend == "SQLite" { - RETRY_TASK_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("RETRY_TASK_QUERY"); let now = Utc::now(); let now_str = format!("{}", now.format("%F %T%.f+00")); @@ -528,7 +452,7 @@ impl AsyncQueue { async fn fetch_and_touch_task_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task_type: Option, ) -> Result, AsyncQueueError> { let task_type = match task_type { @@ -560,18 +484,10 @@ impl AsyncQueue { async fn get_task_type_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task_type: &str, ) -> Result { - let query = if backend == "PostgreSQL" { - FETCH_TASK_TYPE_QUERY_POSTGRES - } else if backend == "SQLite" { - FETCH_TASK_TYPE_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("FETCH_TASK_TYPE_QUERY"); let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -586,19 +502,11 @@ impl AsyncQueue { async fn update_task_state_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task: &Task, state: FangTaskState, ) -> Result { - let query = if backend == "PostgreSQL" { - UPDATE_TASK_STATE_QUERY_POSTGRES - } else if backend == "SQLite" { - UPDATE_TASK_STATE_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("UPDATE_TASK_STATE_QUERY"); let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -619,20 +527,12 @@ impl AsyncQueue { async fn insert_task_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { - let query = if backend == "PostgreSQL" { - INSERT_TASK_QUERY_POSTGRES - } else if backend == "SQLite" { - INSERT_TASK_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("INSERT_TASK_QUERY"); let uuid = Uuid::new_v4(); let mut buffer = Uuid::encode_buffer(); @@ -653,20 +553,12 @@ impl AsyncQueue { async fn insert_task_uniq_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, ) -> Result { - let query = if backend == "PostgreSQL" { - INSERT_TASK_UNIQ_QUERY_POSTGRES - } else if backend == "SQLite" { - INSERT_TASK_UNIQ_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("INSERT_TASK_UNIQ_QUERY"); let uuid = Uuid::new_v4(); let mut buffer = Uuid::encode_buffer(); @@ -690,7 +582,7 @@ impl AsyncQueue { async fn insert_task_if_not_exist_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, @@ -719,18 +611,10 @@ impl AsyncQueue { async fn find_task_by_uniq_hash_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, metadata: &serde_json::Value, ) -> Option { - let query = if backend == "PostgreSQL" { - FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES - } else if backend == "SQLite" { - FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE - } else if backend == "MySQL" { - unimplemented!() - } else { - unreachable!() - }; + let query = backend.select_query("FIND_TASK_BY_UNIQ_HASH_QUERY"); let uniq_hash = Self::calculate_hash(metadata.to_string()); @@ -743,7 +627,7 @@ impl AsyncQueue { async fn schedule_task_query( transaction: &mut Transaction<'_, Any>, - backend: &str, + backend: &Backend, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; @@ -790,7 +674,10 @@ impl AsyncQueue { } #[async_trait] -impl AsyncQueueable for AsyncQueue { +impl AsyncQueueable for AsyncQueue +where + Backend: BackendSqlX, +{ async fn find_task_by_id(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; @@ -981,6 +868,6 @@ impl AsyncQueueable for AsyncQueue { } #[cfg(test)] -test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} +test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} #[cfg(test)] -test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} +test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index 90422f4e..a45a8f54 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -254,6 +254,7 @@ mod async_worker_tests { use crate::asynk::async_queue::AsyncQueue; use crate::asynk::async_queue::AsyncQueueable; use crate::asynk::async_worker::Task; + use crate::asynk::backend_sqlx::BackendSqlXPg; use crate::asynk::AsyncRunnable; use crate::FangError; use crate::FangTaskState; @@ -563,7 +564,7 @@ mod async_worker_tests { assert_eq!(id2, task2.id); } - async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { + async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { test.insert_task(task).await.unwrap() } diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs new file mode 100644 index 00000000..e762c335 --- /dev/null +++ b/fang/src/asynk/backend_sqlx.rs @@ -0,0 +1,98 @@ +use std::fmt::Debug; + +const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_POSTGRES: &str = + include_str!("queries_postgres/update_task_state.sql"); +const FAIL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES: &str = + include_str!("queries_postgres/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_POSTGRES: &str = + include_str!("queries_postgres/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_POSTGRES: &str = + include_str!("queries_postgres/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_POSTGRES: &str = include_str!("queries_postgres/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES: &str = + include_str!("queries_postgres/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_POSTGRES: &str = include_str!("queries_postgres/find_task_by_id.sql"); +const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/retry_task.sql"); + +const INSERT_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_SQLITE: &str = include_str!("queries_sqlite/update_task_state.sql"); +const FAIL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE: &str = + include_str!("queries_sqlite/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_SQLITE: &str = + include_str!("queries_sqlite/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = + include_str!("queries_sqlite/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_task_by_id.sql"); +const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); + +pub trait BackendSqlX: Send + Debug + Clone + Sync { + fn select_query(&self, query: &str) -> &str; + fn name(&self) -> &str; +} + +#[derive(Debug, Clone)] +pub struct BackendSqlXPg {} + +impl BackendSqlX for BackendSqlXPg { + fn select_query(&self, query: &str) -> &str { + match query { + "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_POSTGRES, + "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_POSTGRES, + "UPDATE_TASK_STATE_QUERY" => UPDATE_TASK_STATE_QUERY_POSTGRES, + "FAIL_TASK_QUERY" => FAIL_TASK_QUERY_POSTGRES, + "REMOVE_ALL_TASK_QUERY" => REMOVE_ALL_TASK_QUERY_POSTGRES, + "REMOVE_ALL_SCHEDULED_TASK_QUERY" => REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + "REMOVE_TASK_QUERY" => REMOVE_TASK_QUERY_POSTGRES, + "REMOVE_TASK_BY_METADATA_QUERY" => REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + "REMOVE_TASKS_TYPE_QUERY" => REMOVE_TASKS_TYPE_QUERY_POSTGRES, + "FETCH_TASK_TYPE_QUERY" => FETCH_TASK_TYPE_QUERY_POSTGRES, + "FIND_TASK_BY_UNIQ_HASH_QUERY" => FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + "FIND_TASK_BY_ID_QUERY" => FIND_TASK_BY_ID_QUERY_POSTGRES, + "RETRY_TASK_QUERY" => RETRY_TASK_QUERY_POSTGRES, + _ => unreachable!(), + } + } + + fn name(&self) -> &str { + "PostgreSQL" + } +} + +#[derive(Debug, Clone)] +pub struct BackendSqlXSQLite {} + +impl BackendSqlX for BackendSqlXSQLite { + fn select_query(&self, query: &str) -> &str { + match query { + "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_SQLITE, + "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_SQLITE, + "UPDATE_TASK_STATE_QUERY" => UPDATE_TASK_STATE_QUERY_SQLITE, + "FAIL_TASK_QUERY" => FAIL_TASK_QUERY_SQLITE, + "REMOVE_ALL_TASK_QUERY" => REMOVE_ALL_TASK_QUERY_SQLITE, + "REMOVE_ALL_SCHEDULED_TASK_QUERY" => REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + "REMOVE_TASK_QUERY" => REMOVE_TASK_QUERY_SQLITE, + "REMOVE_TASK_BY_METADATA_QUERY" => REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + "REMOVE_TASKS_TYPE_QUERY" => REMOVE_TASKS_TYPE_QUERY_SQLITE, + "FETCH_TASK_TYPE_QUERY" => FETCH_TASK_TYPE_QUERY_SQLITE, + "FIND_TASK_BY_UNIQ_HASH_QUERY" => FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + "FIND_TASK_BY_ID_QUERY" => FIND_TASK_BY_ID_QUERY_SQLITE, + "RETRY_TASK_QUERY" => RETRY_TASK_QUERY_SQLITE, + _ => unreachable!(), + } + } + + fn name(&self) -> &str { + "SQLite" + } +} From 004082026c5fd9cc5e539cf05c903e357374e87f Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 30 Aug 2023 02:26:38 +0200 Subject: [PATCH 19/90] better implementation with enum variants --- .../asynk/simple_async_worker/src/main.rs | 4 +- .../simple_cron_async_worker/src/main.rs | 4 +- fang/src/asynk/async_queue.rs | 77 +++++++---------- fang/src/asynk/async_worker.rs | 3 +- fang/src/asynk/backend_sqlx.rs | 84 ++++++++++++++++--- 5 files changed, 104 insertions(+), 68 deletions(-) diff --git a/fang/fang_examples/asynk/simple_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_async_worker/src/main.rs index b4a60526..fdc7eef1 100644 --- a/fang/fang_examples/asynk/simple_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_async_worker/src/main.rs @@ -7,7 +7,6 @@ use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::run_migrations_postgres; use fang::AsyncRunnable; -use fang::BackendSqlXPg; use simple_async_worker::MyFailingTask; use simple_async_worker::MyTask; use std::env; @@ -34,13 +33,12 @@ async fn main() { let mut queue = AsyncQueue::builder() .uri(database_url) .max_pool_size(max_pool_size) - .backend(BackendSqlXPg {}) .build(); queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs index 66a27cc1..34709be3 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs @@ -3,7 +3,6 @@ use fang::asynk::async_queue::AsyncQueue; use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::AsyncRunnable; -use fang::BackendSqlXPg; use simple_cron_async_worker::MyCronTask; use std::env; use std::time::Duration; @@ -19,13 +18,12 @@ async fn main() { let mut queue = AsyncQueue::builder() .uri(database_url) .max_pool_size(max_pool_size) - .backend(BackendSqlXPg {}) .build(); queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 42f24d12..f253c5b5 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -43,7 +43,7 @@ pub enum AsyncQueueError { "AsyncQueue is not connected :( , call connect() method first and then perform operations" )] NotConnectedError, - #[error("AsyncQueue generic does not correspond to uri Backend")] + #[error("AsyncQueue generic does not correspond to uri BackendSqlX")] ConnectionError, #[error("Can not convert `std::time::Duration` to `chrono::Duration`")] TimeError, @@ -58,8 +58,8 @@ impl From for AsyncQueueError { } /// This trait defines operations for an asynchronous queue. -/// The trait can be implemented for different storage backends. -/// For now, the trait is only implemented for PostgreSQL. More backends are planned to be implemented in the future. +/// The trait can be implemented for different storage BackendSqlXs. +/// For now, the trait is only implemented for PostgreSQL. More BackendSqlXs are planned to be implemented in the future. #[async_trait] pub trait AsyncQueueable: Send { @@ -137,10 +137,7 @@ pub trait AsyncQueueable: Send { /// #[derive(TypedBuilder, Debug, Clone)] -pub struct AsyncQueue -where - Backend: BackendSqlX, -{ +pub struct AsyncQueue { #[builder(default=None, setter(skip))] pool: Option, #[builder(setter(into))] @@ -149,7 +146,8 @@ where max_pool_size: u32, #[builder(default = false, setter(skip))] connected: bool, - backend: Backend, + #[builder(default = BackendSqlX::NoBackend, setter(skip))] + backend: BackendSqlX, } #[cfg(test)] @@ -170,20 +168,13 @@ use std::path::Path; use super::backend_sqlx::BackendSqlX; #[cfg(test)] -use super::backend_sqlx::BackendSqlXSQLite; - -#[cfg(test)] -use super::backend_sqlx::BackendSqlXPg; - -#[cfg(test)] -impl AsyncQueue { +impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_postgres() -> Self { const BASE_URI: &str = "postgres://postgres:postgres@localhost"; let mut res = Self::builder() .max_pool_size(1_u32) .uri(format!("{}/fang", BASE_URI)) - .backend(BackendSqlXPg {}) .build(); let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; @@ -220,10 +211,7 @@ impl AsyncQueue { res } -} -#[cfg(test)] -impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_sqlite() -> Self { const BASE_FILE: &str = "../fang.db"; @@ -247,7 +235,6 @@ impl AsyncQueue { let mut res = Self::builder() .max_pool_size(1_u32) .uri(format!("sqlite://{}", db_name)) - .backend(BackendSqlXSQLite {}) .build(); res.connect().await.expect("fail to connect"); @@ -255,10 +242,7 @@ impl AsyncQueue { } } -impl AsyncQueue -where - Backend: BackendSqlX, -{ +impl AsyncQueue { /// Check if the connection with db is established pub fn check_if_connection(&self) -> Result<(), AsyncQueueError> { if self.connected { @@ -283,9 +267,7 @@ where drop(conn); - if self.backend.name() != backend { - return Err(AsyncQueueError::ConnectionError); - } + self.backend = BackendSqlX::new_with_name(&backend); self.pool = Some(pool); self.connected = true; @@ -294,7 +276,7 @@ where async fn remove_all_tasks_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, ) -> Result { let query = backend.select_query("REMOVE_ALL_TASK_QUERY"); @@ -306,7 +288,7 @@ where async fn remove_all_scheduled_tasks_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, ) -> Result { let query = backend.select_query("REMOVE_ALL_SCHEDULED_TASK_QUERY"); @@ -321,7 +303,7 @@ where async fn remove_task_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, id: &Uuid, ) -> Result { let query = backend.select_query("REMOVE_TASK_QUERY"); @@ -347,7 +329,7 @@ where async fn remove_task_by_metadata_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; @@ -365,7 +347,7 @@ where async fn remove_tasks_type_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task_type: &str, ) -> Result { let query = backend.select_query("REMOVE_TASKS_TYPE_QUERY"); @@ -379,7 +361,7 @@ where async fn find_task_by_id_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, id: &Uuid, ) -> Result { let query = backend.select_query("FIND_TASK_BY_ID_QUERY"); @@ -397,7 +379,7 @@ where async fn fail_task_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task: &Task, error_message: &str, ) -> Result { @@ -421,7 +403,7 @@ where async fn schedule_retry_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task: &Task, backoff_seconds: u32, error: &str, @@ -452,7 +434,7 @@ where async fn fetch_and_touch_task_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task_type: Option, ) -> Result, AsyncQueueError> { let task_type = match task_type { @@ -484,7 +466,7 @@ where async fn get_task_type_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task_type: &str, ) -> Result { let query = backend.select_query("FETCH_TASK_TYPE_QUERY"); @@ -502,7 +484,7 @@ where async fn update_task_state_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task: &Task, state: FangTaskState, ) -> Result { @@ -527,7 +509,7 @@ where async fn insert_task_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, @@ -553,7 +535,7 @@ where async fn insert_task_uniq_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, @@ -582,7 +564,7 @@ where async fn insert_task_if_not_exist_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, metadata: serde_json::Value, task_type: &str, scheduled_at: DateTime, @@ -611,7 +593,7 @@ where async fn find_task_by_uniq_hash_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, metadata: &serde_json::Value, ) -> Option { let query = backend.select_query("FIND_TASK_BY_UNIQ_HASH_QUERY"); @@ -627,7 +609,7 @@ where async fn schedule_task_query( transaction: &mut Transaction<'_, Any>, - backend: &Backend, + backend: &BackendSqlX, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; @@ -674,10 +656,7 @@ where } #[async_trait] -impl AsyncQueueable for AsyncQueue -where - Backend: BackendSqlX, -{ +impl AsyncQueueable for AsyncQueue { async fn find_task_by_id(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; @@ -868,6 +847,6 @@ where } #[cfg(test)] -test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} +test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} #[cfg(test)] -test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} +test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index a45a8f54..90422f4e 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -254,7 +254,6 @@ mod async_worker_tests { use crate::asynk::async_queue::AsyncQueue; use crate::asynk::async_queue::AsyncQueueable; use crate::asynk::async_worker::Task; - use crate::asynk::backend_sqlx::BackendSqlXPg; use crate::asynk::AsyncRunnable; use crate::FangError; use crate::FangTaskState; @@ -564,7 +563,7 @@ mod async_worker_tests { assert_eq!(id2, task2.id); } - async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { + async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { test.insert_task(task).await.unwrap() } diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index e762c335..43c65d92 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -36,16 +36,47 @@ const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_task_by_id.sql"); const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); -pub trait BackendSqlX: Send + Debug + Clone + Sync { - fn select_query(&self, query: &str) -> &str; - fn name(&self) -> &str; +#[derive(Debug, Clone)] +pub enum BackendSqlX { + Pg, + Sqlite, + Mysql, + NoBackend, +} + +impl BackendSqlX { + pub fn new_with_name(name: &str) -> BackendSqlX { + match name { + "PostgreSQL" => BackendSqlX::Pg, + "SQLite" => BackendSqlX::Sqlite, + "MySQL" => BackendSqlX::Mysql, + _ => unreachable!(), + } + } + + pub fn select_query<'a>(&self, query: &'a str) -> &'a str { + match self { + BackendSqlX::Pg => BackendSqlXPg::select_query(query), + BackendSqlX::Sqlite => BackendSqlXSQLite::select_query(query), + BackendSqlX::Mysql => BackendSqlXMySQL::select_query(query), + _ => unreachable!(), + } + } + pub fn name(&self) -> &str { + match self { + BackendSqlX::Pg => BackendSqlXPg::name(), + BackendSqlX::Sqlite => BackendSqlXSQLite::name(), + BackendSqlX::Mysql => BackendSqlXMySQL::name(), + _ => unreachable!(), + } + } } #[derive(Debug, Clone)] -pub struct BackendSqlXPg {} +struct BackendSqlXPg {} -impl BackendSqlX for BackendSqlXPg { - fn select_query(&self, query: &str) -> &str { +impl BackendSqlXPg { + fn select_query(query: &str) -> &str { match query { "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_POSTGRES, "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_POSTGRES, @@ -64,16 +95,16 @@ impl BackendSqlX for BackendSqlXPg { } } - fn name(&self) -> &str { + fn name() -> &'static str { "PostgreSQL" } } #[derive(Debug, Clone)] -pub struct BackendSqlXSQLite {} +struct BackendSqlXSQLite {} -impl BackendSqlX for BackendSqlXSQLite { - fn select_query(&self, query: &str) -> &str { +impl BackendSqlXSQLite { + fn select_query(query: &str) -> &str { match query { "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_SQLITE, "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_SQLITE, @@ -92,7 +123,38 @@ impl BackendSqlX for BackendSqlXSQLite { } } - fn name(&self) -> &str { + fn name() -> &'static str { "SQLite" } } + +#[derive(Debug, Clone)] +struct BackendSqlXMySQL {} + +impl BackendSqlXMySQL { + fn select_query(query: &str) -> &str { + // TODO: MySQL queries + let _query = match query { + "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_POSTGRES, + "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_POSTGRES, + "UPDATE_TASK_STATE_QUERY" => UPDATE_TASK_STATE_QUERY_POSTGRES, + "FAIL_TASK_QUERY" => FAIL_TASK_QUERY_POSTGRES, + "REMOVE_ALL_TASK_QUERY" => REMOVE_ALL_TASK_QUERY_POSTGRES, + "REMOVE_ALL_SCHEDULED_TASK_QUERY" => REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + "REMOVE_TASK_QUERY" => REMOVE_TASK_QUERY_POSTGRES, + "REMOVE_TASK_BY_METADATA_QUERY" => REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + "REMOVE_TASKS_TYPE_QUERY" => REMOVE_TASKS_TYPE_QUERY_POSTGRES, + "FETCH_TASK_TYPE_QUERY" => FETCH_TASK_TYPE_QUERY_POSTGRES, + "FIND_TASK_BY_UNIQ_HASH_QUERY" => FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + "FIND_TASK_BY_ID_QUERY" => FIND_TASK_BY_ID_QUERY_POSTGRES, + "RETRY_TASK_QUERY" => RETRY_TASK_QUERY_POSTGRES, + _ => unreachable!(), + }; + + todo!() + } + + fn name() -> &'static str { + "MySQL" + } +} From 08238bc5163120e3a894ac39ca2d9d3e8123eaca Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Thu, 31 Aug 2023 11:53:38 +0200 Subject: [PATCH 20/90] `Makefile` now creates `tests_sqlite` directory --- .env | 1 + Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/.env b/.env index 43ea1787..1f7e4b35 100644 --- a/.env +++ b/.env @@ -20,6 +20,7 @@ SQLITE_FILE=fang.db SQLITE_DIESEL_DIR=fang/sqlite_migrations SQLITE_MIGRATIONS=${SQLITE_DIESEL_DIR}/migrations SQLITE_CONFIG=${SQLITE_DIESEL_DIR}/diesel.toml +SQLITE_TESTS_DIR=tests_sqlite HOST=127.0.0.1 POSTGRES_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST}/${POSTGRES_DB} diff --git a/Makefile b/Makefile index fc340091..e1848509 100644 --- a/Makefile +++ b/Makefile @@ -47,6 +47,7 @@ db_mysql: db_sqlite: @echo -e $(BOLD)Setting up SQLite database...$(END_BOLD) sqlite3 "$(SQLITE_FILE)" "VACUUM;" + mkdir -p "$(SQLITE_TESTS_DIR)" $(MAKE) diesel_sqlite diesel: $(DIESEL_TARGETS) From 169d9557b185dce9eb8cb689dab2751b57f6e5e9 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Thu, 31 Aug 2023 12:18:25 +0200 Subject: [PATCH 21/90] make `asynk` tests use `.env` variables --- .env | 3 ++- fang/src/asynk/async_queue.rs | 29 ++++++++++++++++++++--------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.env b/.env index 1f7e4b35..df954bbb 100644 --- a/.env +++ b/.env @@ -23,6 +23,7 @@ SQLITE_CONFIG=${SQLITE_DIESEL_DIR}/diesel.toml SQLITE_TESTS_DIR=tests_sqlite HOST=127.0.0.1 -POSTGRES_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST}/${POSTGRES_DB} +POSTGRES_BASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST} +POSTGRES_URL=${POSTGRES_BASE_URL}/${POSTGRES_DB} MYSQL_URL=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@${HOST}/${MYSQL_DB} DATABASE_URL=${POSTGRES_URL} diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index f253c5b5..83f5eb9f 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -165,16 +165,22 @@ use sqlx::Executor; #[cfg(test)] use std::path::Path; +#[cfg(test)] +use std::env; + use super::backend_sqlx::BackendSqlX; #[cfg(test)] impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_postgres() -> Self { - const BASE_URI: &str = "postgres://postgres:postgres@localhost"; + dotenvy::dotenv().expect(".env file not found"); + let base_url = env::var("POSTGRES_BASE_URL").expect("Base URL for Postgres not found"); + let base_db = env::var("POSTGRES_DB").expect("Name for base Postgres DB not found"); + let mut res = Self::builder() .max_pool_size(1_u32) - .uri(format!("{}/fang", BASE_URI)) + .uri(format!("{}/{}", base_url, base_db)) .build(); let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; @@ -192,10 +198,12 @@ impl AsyncQueue { conn.execute(delete_query).await.unwrap(); log::info!("Creating database {db_name} ..."); + let expected_error: &str = &format!( + "source database \"{}\" is being accessed by other users", + base_db + ); while let Err(e) = conn.execute(create_query).await { - if e.as_database_error().unwrap().message() - != "source database \"fang\" is being accessed by other users" - { + if e.as_database_error().unwrap().message() != expected_error { panic!("{:?}", e); } } @@ -206,7 +214,7 @@ impl AsyncQueue { res.connected = false; res.pool = None; - res.uri = format!("{}/{}", BASE_URI, db_name); + res.uri = format!("{}/{}", base_url, db_name); res.connect().await.unwrap(); res @@ -214,11 +222,14 @@ impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_sqlite() -> Self { - const BASE_FILE: &str = "../fang.db"; + dotenvy::dotenv().expect(".env file not found"); + let tests_dir = env::var("SQLITE_TESTS_DIR").expect("Name for tests directory not found"); + let base_file = env::var("SQLITE_FILE").expect("Name for SQLite DB file not found"); + let sqlite_file = format!("../{}", base_file); let mut new_number = ASYNC_QUEUE_SQLITE_TEST_COUNTER.lock().await; - let db_name = format!("../tests_sqlite/async_queue_test_{}.db", *new_number); + let db_name = format!("../{}/async_queue_test_{}.db", tests_dir, *new_number); *new_number += 1; let path = Path::new(&db_name); @@ -229,7 +240,7 @@ impl AsyncQueue { } log::info!("Creating database {db_name} ..."); - std::fs::copy(BASE_FILE, &db_name).unwrap(); + std::fs::copy(sqlite_file, &db_name).unwrap(); log::info!("Database {db_name} created !!"); let mut res = Self::builder() From 458967343c1cdb1e91065823c5fc7a2c28557fbe Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Thu, 31 Aug 2023 12:25:03 +0200 Subject: [PATCH 22/90] drop unnecessary `drop` --- fang/src/asynk/async_queue.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 83f5eb9f..7055f618 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -210,8 +210,6 @@ impl AsyncQueue { log::info!("Database {db_name} created !!"); - drop(conn); - res.connected = false; res.pool = None; res.uri = format!("{}/{}", base_url, db_name); From a11ef5570d2fb7f01ec951093f89eb943d50d4ca Mon Sep 17 00:00:00 2001 From: pxp9 Date: Fri, 1 Sep 2023 14:05:42 +0200 Subject: [PATCH 23/90] query types as enum --- fang/src/asynk/async_queue.rs | 27 +++---- fang/src/asynk/backend_sqlx.rs | 125 +++++++++++++++++++-------------- 2 files changed, 85 insertions(+), 67 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 7055f618..6bc64448 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -1,6 +1,7 @@ #[cfg(test)] mod async_queue_tests; +use crate::SqlXQuery; use crate::asynk::async_runnable::AsyncRunnable; use crate::CronError; use crate::FangTaskState; @@ -287,7 +288,7 @@ impl AsyncQueue { transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, ) -> Result { - let query = backend.select_query("REMOVE_ALL_TASK_QUERY"); + let query = backend.select_query(SqlXQuery::RemoveAllTask); Ok(sqlx::query(query) .execute(transaction.acquire().await?) @@ -299,7 +300,7 @@ impl AsyncQueue { transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, ) -> Result { - let query = backend.select_query("REMOVE_ALL_SCHEDULED_TASK_QUERY"); + let query = backend.select_query(SqlXQuery::RemoveAllScheduledTask); let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -315,7 +316,7 @@ impl AsyncQueue { backend: &BackendSqlX, id: &Uuid, ) -> Result { - let query = backend.select_query("REMOVE_TASK_QUERY"); + let query = backend.select_query(SqlXQuery::RemoveTask); let mut buffer = Uuid::encode_buffer(); let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); @@ -345,7 +346,7 @@ impl AsyncQueue { let uniq_hash = Self::calculate_hash(metadata.to_string()); - let query = backend.select_query("REMOVE_TASK_BY_METADATA_QUERY"); + let query = backend.select_query(SqlXQuery::RemoveTaskByMetadata); Ok(sqlx::query(query) .bind(uniq_hash) @@ -359,7 +360,7 @@ impl AsyncQueue { backend: &BackendSqlX, task_type: &str, ) -> Result { - let query = backend.select_query("REMOVE_TASKS_TYPE_QUERY"); + let query = backend.select_query(SqlXQuery::RemoveTaskType); Ok(sqlx::query(query) .bind(task_type) @@ -373,7 +374,7 @@ impl AsyncQueue { backend: &BackendSqlX, id: &Uuid, ) -> Result { - let query = backend.select_query("FIND_TASK_BY_ID_QUERY"); + let query = backend.select_query(SqlXQuery::FindTaskById); let mut buffer = Uuid::encode_buffer(); let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); @@ -392,7 +393,7 @@ impl AsyncQueue { task: &Task, error_message: &str, ) -> Result { - let query = backend.select_query("FAIL_TASK_QUERY"); + let query = backend.select_query(SqlXQuery::FailTask); let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -417,7 +418,7 @@ impl AsyncQueue { backoff_seconds: u32, error: &str, ) -> Result { - let query = backend.select_query("RETRY_TASK_QUERY"); + let query = backend.select_query(SqlXQuery::RetryTask); let now = Utc::now(); let now_str = format!("{}", now.format("%F %T%.f+00")); @@ -478,7 +479,7 @@ impl AsyncQueue { backend: &BackendSqlX, task_type: &str, ) -> Result { - let query = backend.select_query("FETCH_TASK_TYPE_QUERY"); + let query = backend.select_query(SqlXQuery::FetchTaskType); let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -497,7 +498,7 @@ impl AsyncQueue { task: &Task, state: FangTaskState, ) -> Result { - let query = backend.select_query("UPDATE_TASK_STATE_QUERY"); + let query = backend.select_query(SqlXQuery::UpdateTaskState); let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -523,7 +524,7 @@ impl AsyncQueue { task_type: &str, scheduled_at: DateTime, ) -> Result { - let query = backend.select_query("INSERT_TASK_QUERY"); + let query = backend.select_query(SqlXQuery::InsertTask); let uuid = Uuid::new_v4(); let mut buffer = Uuid::encode_buffer(); @@ -549,7 +550,7 @@ impl AsyncQueue { task_type: &str, scheduled_at: DateTime, ) -> Result { - let query = backend.select_query("INSERT_TASK_UNIQ_QUERY"); + let query = backend.select_query(SqlXQuery::InsertTaskUniq); let uuid = Uuid::new_v4(); let mut buffer = Uuid::encode_buffer(); @@ -605,7 +606,7 @@ impl AsyncQueue { backend: &BackendSqlX, metadata: &serde_json::Value, ) -> Option { - let query = backend.select_query("FIND_TASK_BY_UNIQ_HASH_QUERY"); + let query = backend.select_query(SqlXQuery::FindTaskByUniqHash); let uniq_hash = Self::calculate_hash(metadata.to_string()); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 43c65d92..7539fd77 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -37,7 +37,7 @@ const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_tas const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); #[derive(Debug, Clone)] -pub enum BackendSqlX { +pub(crate) enum BackendSqlX { Pg, Sqlite, Mysql, @@ -54,7 +54,7 @@ impl BackendSqlX { } } - pub fn select_query<'a>(&self, query: &'a str) -> &'a str { + pub (crate) fn select_query<'a>(&self, query: SqlXQuery) -> &'a str { match self { BackendSqlX::Pg => BackendSqlXPg::select_query(query), BackendSqlX::Sqlite => BackendSqlXSQLite::select_query(query), @@ -62,40 +62,59 @@ impl BackendSqlX { _ => unreachable!(), } } - pub fn name(&self) -> &str { + + // I think it is useful to have this method, although it is not used + pub (crate) fn _name(&self) -> &str { match self { - BackendSqlX::Pg => BackendSqlXPg::name(), - BackendSqlX::Sqlite => BackendSqlXSQLite::name(), - BackendSqlX::Mysql => BackendSqlXMySQL::name(), + BackendSqlX::Pg => BackendSqlXPg::_name(), + BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), + BackendSqlX::Mysql => BackendSqlXMySQL::_name(), _ => unreachable!(), } } } +#[derive(Debug, Clone)] +pub(crate) enum SqlXQuery { + InsertTask, + InsertTaskUniq, + UpdateTaskState, + FailTask, + RemoveAllTask, + RemoveAllScheduledTask, + RemoveTask, + RemoveTaskByMetadata, + RemoveTaskType, + FetchTaskType, + FindTaskByUniqHash, + FindTaskById, + RetryTask +} + #[derive(Debug, Clone)] struct BackendSqlXPg {} +use SqlXQuery as Q ; impl BackendSqlXPg { - fn select_query(query: &str) -> &str { + fn select_query(query: SqlXQuery) -> &'static str { match query { - "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_POSTGRES, - "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_POSTGRES, - "UPDATE_TASK_STATE_QUERY" => UPDATE_TASK_STATE_QUERY_POSTGRES, - "FAIL_TASK_QUERY" => FAIL_TASK_QUERY_POSTGRES, - "REMOVE_ALL_TASK_QUERY" => REMOVE_ALL_TASK_QUERY_POSTGRES, - "REMOVE_ALL_SCHEDULED_TASK_QUERY" => REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, - "REMOVE_TASK_QUERY" => REMOVE_TASK_QUERY_POSTGRES, - "REMOVE_TASK_BY_METADATA_QUERY" => REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, - "REMOVE_TASKS_TYPE_QUERY" => REMOVE_TASKS_TYPE_QUERY_POSTGRES, - "FETCH_TASK_TYPE_QUERY" => FETCH_TASK_TYPE_QUERY_POSTGRES, - "FIND_TASK_BY_UNIQ_HASH_QUERY" => FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, - "FIND_TASK_BY_ID_QUERY" => FIND_TASK_BY_ID_QUERY_POSTGRES, - "RETRY_TASK_QUERY" => RETRY_TASK_QUERY_POSTGRES, - _ => unreachable!(), + Q::InsertTask => INSERT_TASK_QUERY_POSTGRES, + Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_POSTGRES, + Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_POSTGRES, + Q::FailTask => FAIL_TASK_QUERY_POSTGRES, + Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_POSTGRES, + Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + Q::RemoveTask => REMOVE_TASK_QUERY_POSTGRES, + Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_POSTGRES, + Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_POSTGRES, + Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + Q::FindTaskById => FIND_TASK_BY_ID_QUERY_POSTGRES, + Q::RetryTask => RETRY_TASK_QUERY_POSTGRES, } } - fn name() -> &'static str { + fn _name() -> &'static str { "PostgreSQL" } } @@ -104,26 +123,25 @@ impl BackendSqlXPg { struct BackendSqlXSQLite {} impl BackendSqlXSQLite { - fn select_query(query: &str) -> &str { + fn select_query(query: SqlXQuery) -> &'static str { match query { - "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_SQLITE, - "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_SQLITE, - "UPDATE_TASK_STATE_QUERY" => UPDATE_TASK_STATE_QUERY_SQLITE, - "FAIL_TASK_QUERY" => FAIL_TASK_QUERY_SQLITE, - "REMOVE_ALL_TASK_QUERY" => REMOVE_ALL_TASK_QUERY_SQLITE, - "REMOVE_ALL_SCHEDULED_TASK_QUERY" => REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, - "REMOVE_TASK_QUERY" => REMOVE_TASK_QUERY_SQLITE, - "REMOVE_TASK_BY_METADATA_QUERY" => REMOVE_TASK_BY_METADATA_QUERY_SQLITE, - "REMOVE_TASKS_TYPE_QUERY" => REMOVE_TASKS_TYPE_QUERY_SQLITE, - "FETCH_TASK_TYPE_QUERY" => FETCH_TASK_TYPE_QUERY_SQLITE, - "FIND_TASK_BY_UNIQ_HASH_QUERY" => FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, - "FIND_TASK_BY_ID_QUERY" => FIND_TASK_BY_ID_QUERY_SQLITE, - "RETRY_TASK_QUERY" => RETRY_TASK_QUERY_SQLITE, - _ => unreachable!(), + Q::InsertTask => INSERT_TASK_QUERY_SQLITE, + Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_SQLITE, + Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_SQLITE, + Q::FailTask => FAIL_TASK_QUERY_SQLITE, + Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_SQLITE, + Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + Q::RemoveTask => REMOVE_TASK_QUERY_SQLITE, + Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_SQLITE, + Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_SQLITE, + Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + Q::FindTaskById => FIND_TASK_BY_ID_QUERY_SQLITE, + Q::RetryTask => RETRY_TASK_QUERY_SQLITE, } } - fn name() -> &'static str { + fn _name() -> &'static str { "SQLite" } } @@ -132,29 +150,28 @@ impl BackendSqlXSQLite { struct BackendSqlXMySQL {} impl BackendSqlXMySQL { - fn select_query(query: &str) -> &str { + fn select_query(query: SqlXQuery) -> &'static str { // TODO: MySQL queries let _query = match query { - "INSERT_TASK_QUERY" => INSERT_TASK_QUERY_POSTGRES, - "INSERT_TASK_UNIQ_QUERY" => INSERT_TASK_UNIQ_QUERY_POSTGRES, - "UPDATE_TASK_STATE_QUERY" => UPDATE_TASK_STATE_QUERY_POSTGRES, - "FAIL_TASK_QUERY" => FAIL_TASK_QUERY_POSTGRES, - "REMOVE_ALL_TASK_QUERY" => REMOVE_ALL_TASK_QUERY_POSTGRES, - "REMOVE_ALL_SCHEDULED_TASK_QUERY" => REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, - "REMOVE_TASK_QUERY" => REMOVE_TASK_QUERY_POSTGRES, - "REMOVE_TASK_BY_METADATA_QUERY" => REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, - "REMOVE_TASKS_TYPE_QUERY" => REMOVE_TASKS_TYPE_QUERY_POSTGRES, - "FETCH_TASK_TYPE_QUERY" => FETCH_TASK_TYPE_QUERY_POSTGRES, - "FIND_TASK_BY_UNIQ_HASH_QUERY" => FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, - "FIND_TASK_BY_ID_QUERY" => FIND_TASK_BY_ID_QUERY_POSTGRES, - "RETRY_TASK_QUERY" => RETRY_TASK_QUERY_POSTGRES, - _ => unreachable!(), + Q::InsertTask => INSERT_TASK_QUERY_SQLITE, + Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_SQLITE, + Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_SQLITE, + Q::FailTask => FAIL_TASK_QUERY_SQLITE, + Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_SQLITE, + Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + Q::RemoveTask => REMOVE_TASK_QUERY_SQLITE, + Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_SQLITE, + Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_SQLITE, + Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + Q::FindTaskById => FIND_TASK_BY_ID_QUERY_SQLITE, + Q::RetryTask => RETRY_TASK_QUERY_SQLITE, }; todo!() } - fn name() -> &'static str { + fn _name() -> &'static str { "MySQL" } } From 3332c83c41d43044b7cd47ef6b27ae7686aac98e Mon Sep 17 00:00:00 2001 From: pxp9 Date: Fri, 1 Sep 2023 14:06:55 +0200 Subject: [PATCH 24/90] fix clippy --- fang/src/asynk.rs | 1 - fang/src/asynk/async_queue.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/fang/src/asynk.rs b/fang/src/asynk.rs index ab11ea3a..2a8ab87d 100644 --- a/fang/src/asynk.rs +++ b/fang/src/asynk.rs @@ -8,4 +8,3 @@ pub use async_queue::*; pub use async_runnable::AsyncRunnable; pub use async_worker::*; pub use async_worker_pool::*; -pub use backend_sqlx::*; diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 6bc64448..c07ec8ba 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -1,7 +1,7 @@ #[cfg(test)] mod async_queue_tests; -use crate::SqlXQuery; +use crate::backend_sqlx::SqlXQuery; use crate::asynk::async_runnable::AsyncRunnable; use crate::CronError; use crate::FangTaskState; From 5f1c601566f0942eca7eb8dff8990b23c42fbf7d Mon Sep 17 00:00:00 2001 From: pxp9 Date: Fri, 1 Sep 2023 14:11:44 +0200 Subject: [PATCH 25/90] fix fmt, my bad i had a missconf in my home computer xd --- fang/src/asynk/async_queue.rs | 2 +- fang/src/asynk/backend_sqlx.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index c07ec8ba..e39a25a0 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -1,8 +1,8 @@ #[cfg(test)] mod async_queue_tests; -use crate::backend_sqlx::SqlXQuery; use crate::asynk::async_runnable::AsyncRunnable; +use crate::backend_sqlx::SqlXQuery; use crate::CronError; use crate::FangTaskState; use crate::Scheduled::*; diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 7539fd77..8734eb0f 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -54,7 +54,7 @@ impl BackendSqlX { } } - pub (crate) fn select_query<'a>(&self, query: SqlXQuery) -> &'a str { + pub(crate) fn select_query<'a>(&self, query: SqlXQuery) -> &'a str { match self { BackendSqlX::Pg => BackendSqlXPg::select_query(query), BackendSqlX::Sqlite => BackendSqlXSQLite::select_query(query), @@ -64,7 +64,7 @@ impl BackendSqlX { } // I think it is useful to have this method, although it is not used - pub (crate) fn _name(&self) -> &str { + pub(crate) fn _name(&self) -> &str { match self { BackendSqlX::Pg => BackendSqlXPg::_name(), BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), @@ -88,13 +88,13 @@ pub(crate) enum SqlXQuery { FetchTaskType, FindTaskByUniqHash, FindTaskById, - RetryTask + RetryTask, } #[derive(Debug, Clone)] struct BackendSqlXPg {} -use SqlXQuery as Q ; +use SqlXQuery as Q; impl BackendSqlXPg { fn select_query(query: SqlXQuery) -> &'static str { match query { From 67bf9d01fb1ab0bbf734815ac44c59f6771a0707 Mon Sep 17 00:00:00 2001 From: pxp9 Date: Fri, 1 Sep 2023 17:09:23 +0200 Subject: [PATCH 26/90] stupid mysql does not work anything , i hate it so much :/ --- .env | 3 +- fang/src/asynk/async_queue.rs | 53 +++++++++++++++++++ fang/src/asynk/backend_sqlx.rs | 50 ++++++++++------- fang/src/asynk/queries_mysql/fail_task.sql | 3 ++ .../asynk/queries_mysql/fetch_task_type.sql | 1 + .../asynk/queries_mysql/find_task_by_id.sql | 1 + .../queries_mysql/find_task_by_uniq_hash.sql | 1 + fang/src/asynk/queries_mysql/insert_task.sql | 7 +++ .../asynk/queries_mysql/insert_task_uniq.sql | 5 ++ .../remove_all_scheduled_tasks.sql | 1 + .../asynk/queries_mysql/remove_all_tasks.sql | 1 + fang/src/asynk/queries_mysql/remove_task.sql | 1 + .../queries_mysql/remove_task_by_metadata.sql | 1 + .../asynk/queries_mysql/remove_tasks_type.sql | 1 + fang/src/asynk/queries_mysql/retry_task.sql | 3 ++ .../asynk/queries_mysql/update_task_state.sql | 5 ++ 16 files changed, 118 insertions(+), 19 deletions(-) create mode 100644 fang/src/asynk/queries_mysql/fail_task.sql create mode 100644 fang/src/asynk/queries_mysql/fetch_task_type.sql create mode 100644 fang/src/asynk/queries_mysql/find_task_by_id.sql create mode 100644 fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql create mode 100644 fang/src/asynk/queries_mysql/insert_task.sql create mode 100644 fang/src/asynk/queries_mysql/insert_task_uniq.sql create mode 100644 fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql create mode 100644 fang/src/asynk/queries_mysql/remove_all_tasks.sql create mode 100644 fang/src/asynk/queries_mysql/remove_task.sql create mode 100644 fang/src/asynk/queries_mysql/remove_task_by_metadata.sql create mode 100644 fang/src/asynk/queries_mysql/remove_tasks_type.sql create mode 100644 fang/src/asynk/queries_mysql/retry_task.sql create mode 100644 fang/src/asynk/queries_mysql/update_task_state.sql diff --git a/.env b/.env index df954bbb..91c5e820 100644 --- a/.env +++ b/.env @@ -25,5 +25,6 @@ SQLITE_TESTS_DIR=tests_sqlite HOST=127.0.0.1 POSTGRES_BASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST} POSTGRES_URL=${POSTGRES_BASE_URL}/${POSTGRES_DB} -MYSQL_URL=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@${HOST}/${MYSQL_DB} +MYSQL_BASE_URL=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@${HOST} +MYSQL_URL=${MYSQL_BASE_URL}/${MYSQL_DB} DATABASE_URL=${POSTGRES_URL} diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index e39a25a0..aeede499 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -250,6 +250,56 @@ impl AsyncQueue { res.connect().await.expect("fail to connect"); res } + + /// Provides an AsyncQueue connected to its own DB + pub async fn test_mysql() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let base_url = env::var("MYSQL_BASE_URL").expect("Base URL for MySQL not found"); + let base_db = env::var("MYSQL_DB").expect("Name for base MySQL DB not found"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("{}/{}", base_url, base_db)) + .build(); + + let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; + res.connect().await.unwrap(); + + let db_name = format!("async_queue_test_{}", *new_number); + *new_number += 1; + + let create_query: &str = &format!( + "CREATE DATABASE {}; CREATE TABLE {}.fang_tasks LIKE fang.fang_tasks;", + db_name, db_name + ); + + let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); + + let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); + + log::info!("Deleting database {db_name} ..."); + conn.execute(delete_query).await.unwrap(); + + log::info!("Creating database {db_name} ..."); + let expected_error: &str = &format!( + "source database \"{}\" is being accessed by other users", + base_db + ); + while let Err(e) = conn.execute(create_query).await { + if e.as_database_error().unwrap().message() != expected_error { + panic!("{:?}", e); + } + } + + log::info!("Database {db_name} created !!"); + + res.connected = false; + res.pool = None; + res.uri = format!("{}/{}", base_url, db_name); + res.connect().await.unwrap(); + + res + } } impl AsyncQueue { @@ -860,3 +910,6 @@ impl AsyncQueueable for AsyncQueue { test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} #[cfg(test)] test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} + +#[cfg(test)] +test_asynk_queue! {mysql, crate::AsyncQueue, crate::AsyncQueue::test_mysql()} diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 8734eb0f..c0abecb7 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -36,6 +36,23 @@ const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_task_by_id.sql"); const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); +const INSERT_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_MYSQL: &str = include_str!("queries_mysql/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_MYSQL: &str = include_str!("queries_mysql/update_task_state.sql"); +const FAIL_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL: &str = + include_str!("queries_mysql/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_MYSQL: &str = + include_str!("queries_mysql/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_MYSQL: &str = include_str!("queries_mysql/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_MYSQL: &str = include_str!("queries_mysql/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL: &str = + include_str!("queries_mysql/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_MYSQL: &str = include_str!("queries_mysql/find_task_by_id.sql"); +const RETRY_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/retry_task.sql"); + #[derive(Debug, Clone)] pub(crate) enum BackendSqlX { Pg, @@ -151,24 +168,21 @@ struct BackendSqlXMySQL {} impl BackendSqlXMySQL { fn select_query(query: SqlXQuery) -> &'static str { - // TODO: MySQL queries - let _query = match query { - Q::InsertTask => INSERT_TASK_QUERY_SQLITE, - Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_SQLITE, - Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_SQLITE, - Q::FailTask => FAIL_TASK_QUERY_SQLITE, - Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_SQLITE, - Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, - Q::RemoveTask => REMOVE_TASK_QUERY_SQLITE, - Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_SQLITE, - Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_SQLITE, - Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_SQLITE, - Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, - Q::FindTaskById => FIND_TASK_BY_ID_QUERY_SQLITE, - Q::RetryTask => RETRY_TASK_QUERY_SQLITE, - }; - - todo!() + match query { + Q::InsertTask => INSERT_TASK_QUERY_MYSQL, + Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_MYSQL, + Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_MYSQL, + Q::FailTask => FAIL_TASK_QUERY_MYSQL, + Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_MYSQL, + Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, + Q::RemoveTask => REMOVE_TASK_QUERY_MYSQL, + Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_MYSQL, + Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_MYSQL, + Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_MYSQL, + Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, + Q::FindTaskById => FIND_TASK_BY_ID_QUERY_MYSQL, + Q::RetryTask => RETRY_TASK_QUERY_MYSQL, + } } fn _name() -> &'static str { diff --git a/fang/src/asynk/queries_mysql/fail_task.sql b/fang/src/asynk/queries_mysql/fail_task.sql new file mode 100644 index 00000000..b89d13d1 --- /dev/null +++ b/fang/src/asynk/queries_mysql/fail_task.sql @@ -0,0 +1,3 @@ +UPDATE fang_tasks SET state = $1 , error_message = $2 , updated_at = $3 WHERE id = $4 ; + +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $4 ; diff --git a/fang/src/asynk/queries_mysql/fetch_task_type.sql b/fang/src/asynk/queries_mysql/fetch_task_type.sql new file mode 100644 index 00000000..02c3f9f4 --- /dev/null +++ b/fang/src/asynk/queries_mysql/fetch_task_type.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state, task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 diff --git a/fang/src/asynk/queries_mysql/find_task_by_id.sql b/fang/src/asynk/queries_mysql/find_task_by_id.sql new file mode 100644 index 00000000..60b4cf93 --- /dev/null +++ b/fang/src/asynk/queries_mysql/find_task_by_id.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1 diff --git a/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql new file mode 100644 index 00000000..d12443ad --- /dev/null +++ b/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_mysql/insert_task.sql b/fang/src/asynk/queries_mysql/insert_task.sql new file mode 100644 index 00000000..6f0c2bba --- /dev/null +++ b/fang/src/asynk/queries_mysql/insert_task.sql @@ -0,0 +1,7 @@ +BEGIN + +INSERT INTO fang_tasks (id, metadata, task_type, scheduled_at) VALUES (?, ?, ?, ?); + +SELECT * FROM fang_tasks WHERE id = 'uuid'; + +END \ No newline at end of file diff --git a/fang/src/asynk/queries_mysql/insert_task_uniq.sql b/fang/src/asynk/queries_mysql/insert_task_uniq.sql new file mode 100644 index 00000000..dbbe6d73 --- /dev/null +++ b/fang/src/asynk/queries_mysql/insert_task_uniq.sql @@ -0,0 +1,5 @@ +INSERT INTO fang_tasks ( id , metadata, task_type , uniq_hash, scheduled_at) +VALUES ($1, $2 , $3, $4, $5 ) ; + + +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1 ; diff --git a/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql new file mode 100644 index 00000000..db72fce8 --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE scheduled_at > $1 diff --git a/fang/src/asynk/queries_mysql/remove_all_tasks.sql b/fang/src/asynk/queries_mysql/remove_all_tasks.sql new file mode 100644 index 00000000..4da949ca --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_all_tasks.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks diff --git a/fang/src/asynk/queries_mysql/remove_task.sql b/fang/src/asynk/queries_mysql/remove_task.sql new file mode 100644 index 00000000..4a384bd7 --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_task.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE id = $1 diff --git a/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql new file mode 100644 index 00000000..85cb4eea --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE uniq_hash = $1 diff --git a/fang/src/asynk/queries_mysql/remove_tasks_type.sql b/fang/src/asynk/queries_mysql/remove_tasks_type.sql new file mode 100644 index 00000000..a12477fc --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_tasks_type.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE task_type = $1 diff --git a/fang/src/asynk/queries_mysql/retry_task.sql b/fang/src/asynk/queries_mysql/retry_task.sql new file mode 100644 index 00000000..4be5ea8f --- /dev/null +++ b/fang/src/asynk/queries_mysql/retry_task.sql @@ -0,0 +1,3 @@ +UPDATE fang_tasks SET state = 'retried' , error_message = $1, retries = $2, scheduled_at = $3, updated_at = $4 WHERE id = $5; + +SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $5 ; \ No newline at end of file diff --git a/fang/src/asynk/queries_mysql/update_task_state.sql b/fang/src/asynk/queries_mysql/update_task_state.sql new file mode 100644 index 00000000..eefb3c23 --- /dev/null +++ b/fang/src/asynk/queries_mysql/update_task_state.sql @@ -0,0 +1,5 @@ +BEGIN + +UPDATE fang_tasks SET state = $1 , updated_at = $2 WHERE id = $3; + +END \ No newline at end of file From fa4f1351daf95b7083230d2ebf98d0b6c5196706 Mon Sep 17 00:00:00 2001 From: pxp9 Date: Sat, 2 Sep 2023 09:49:53 +0200 Subject: [PATCH 27/90] rework to make it work with MySQL --- .../up.sql | 16 +- fang/src/asynk/async_queue.rs | 431 +++----- fang/src/asynk/backend_sqlx.rs | 937 +++++++++++++++++- fang/src/asynk/queries_mysql/fail_task.sql | 4 +- .../asynk/queries_mysql/fetch_task_type.sql | 2 +- .../asynk/queries_mysql/find_task_by_id.sql | 2 +- .../queries_mysql/find_task_by_uniq_hash.sql | 2 +- fang/src/asynk/queries_mysql/insert_task.sql | 8 +- .../remove_all_scheduled_tasks.sql | 2 +- fang/src/asynk/queries_mysql/remove_task.sql | 2 +- .../queries_mysql/remove_task_by_metadata.sql | 2 +- .../asynk/queries_mysql/remove_tasks_type.sql | 2 +- fang/src/asynk/queries_mysql/retry_task.sql | 4 +- .../asynk/queries_mysql/update_task_state.sql | 6 +- .../asynk/queries_sqlite/find_task_by_id.sql | 2 +- .../queries_sqlite/find_task_by_uniq_hash.sql | 2 +- fang/src/asynk/queries_sqlite/insert_task.sql | 2 +- .../asynk/queries_sqlite/insert_task_uniq.sql | 2 +- fang/src/asynk/queries_sqlite/retry_task.sql | 2 +- .../queries_sqlite/update_task_state.sql | 2 +- fang/src/blocking/mysql_schema.rs | 15 +- fang/src/lib.rs | 2 - 22 files changed, 1062 insertions(+), 387 deletions(-) diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index b882b72b..4cd858d6 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -3,17 +3,23 @@ -- docker exec -ti mysql mysql -u root -pfang -P 3360 fang -e "$(catn fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql)" + /* + why `metadata` and `error_message` are not a TEXT ? + MySQL TEXT type, I think it is stored as a BLOB. + So that breaks FromRow trait, implemented in lib.rs line 183 + */ + CREATE TABLE fang_tasks ( id VARCHAR(36) DEFAULT (uuid()) PRIMARY KEY, - metadata JSON NOT NULL, - error_message TEXT, + metadata VARCHAR(2048) NOT NULL, + error_message VARCHAR(2048), state ENUM('new', 'in_progress', 'failed', 'finished', 'retried') NOT NULL DEFAULT 'new', task_type VARCHAR(255) NOT NULL DEFAULT 'common', -- TEXT type can not have default value, stupid MySQL policy uniq_hash CHAR(64), retries INTEGER NOT NULL DEFAULT 0, - scheduled_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + scheduled_at VARCHAR(32) NOT NULL DEFAULT(CONCAT(CURRENT_TIMESTAMP, '.000000000+00')), + created_at VARCHAR(32) NOT NULL DEFAULT (CONCAT(CURRENT_TIMESTAMP , '.000000000+00')), + updated_at VARCHAR(32) NOT NULL DEFAULT (CONCAT(CURRENT_TIMESTAMP , '.000000000+00')) ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index aeede499..d3701b7b 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -2,6 +2,7 @@ mod async_queue_tests; use crate::asynk::async_runnable::AsyncRunnable; +use crate::backend_sqlx::QueryParams; use crate::backend_sqlx::SqlXQuery; use crate::CronError; use crate::FangTaskState; @@ -10,13 +11,10 @@ use crate::Task; use async_trait::async_trait; use chrono::DateTime; -use chrono::Duration; use chrono::Utc; use cron::Schedule; -use sha2::{Digest, Sha256}; use sqlx::any::install_default_drivers; use sqlx::pool::PoolOptions; -use sqlx::Acquire; use sqlx::Any; use sqlx::AnyPool; use sqlx::Transaction; @@ -334,164 +332,6 @@ impl AsyncQueue { Ok(()) } - async fn remove_all_tasks_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - ) -> Result { - let query = backend.select_query(SqlXQuery::RemoveAllTask); - - Ok(sqlx::query(query) - .execute(transaction.acquire().await?) - .await? - .rows_affected()) - } - - async fn remove_all_scheduled_tasks_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - ) -> Result { - let query = backend.select_query(SqlXQuery::RemoveAllScheduledTask); - - let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - Ok(sqlx::query(query) - .bind(now_str) - .execute(transaction.acquire().await?) - .await? - .rows_affected()) - } - - async fn remove_task_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - id: &Uuid, - ) -> Result { - let query = backend.select_query(SqlXQuery::RemoveTask); - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); - - let result = sqlx::query(query) - .bind(&*uuid_as_text) - .execute(transaction.acquire().await?) - .await? - .rows_affected(); - - if result != 1 { - Err(AsyncQueueError::ResultError { - expected: 1, - found: result, - }) - } else { - Ok(result) - } - } - - async fn remove_task_by_metadata_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - task: &dyn AsyncRunnable, - ) -> Result { - let metadata = serde_json::to_value(task)?; - - let uniq_hash = Self::calculate_hash(metadata.to_string()); - - let query = backend.select_query(SqlXQuery::RemoveTaskByMetadata); - - Ok(sqlx::query(query) - .bind(uniq_hash) - .execute(transaction.acquire().await?) - .await? - .rows_affected()) - } - - async fn remove_tasks_type_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - task_type: &str, - ) -> Result { - let query = backend.select_query(SqlXQuery::RemoveTaskType); - - Ok(sqlx::query(query) - .bind(task_type) - .execute(transaction.acquire().await?) - .await? - .rows_affected()) - } - - async fn find_task_by_id_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - id: &Uuid, - ) -> Result { - let query = backend.select_query(SqlXQuery::FindTaskById); - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); - - let task: Task = sqlx::query_as(query) - .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) - .await?; - - Ok(task) - } - - async fn fail_task_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - task: &Task, - error_message: &str, - ) -> Result { - let query = backend.select_query(SqlXQuery::FailTask); - - let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = task.id.as_hyphenated().encode_lower(&mut buffer); - - let failed_task: Task = sqlx::query_as(query) - .bind(<&str>::from(FangTaskState::Failed)) - .bind(error_message) - .bind(updated_at) - .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) - .await?; - - Ok(failed_task) - } - - async fn schedule_retry_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - task: &Task, - backoff_seconds: u32, - error: &str, - ) -> Result { - let query = backend.select_query(SqlXQuery::RetryTask); - - let now = Utc::now(); - let now_str = format!("{}", now.format("%F %T%.f+00")); - - let scheduled_at = now + Duration::seconds(backoff_seconds as i64); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); - let retries = task.retries + 1; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = task.id.as_hyphenated().encode_lower(&mut buffer); - - let failed_task: Task = sqlx::query_as(query) - .bind(error) - .bind(retries) - .bind(scheduled_at_str) - .bind(now_str) - .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) - .await?; - - Ok(failed_task) - } - async fn fetch_and_touch_task_query( transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, @@ -502,132 +342,79 @@ impl AsyncQueue { None => DEFAULT_TASK_TYPE.to_string(), }; - let task = Self::get_task_type_query(transaction, backend, &task_type) + let query_params = QueryParams::builder().task_type(&task_type).build(); + + let task = backend + .execute_query(SqlXQuery::FetchTaskType, transaction, query_params) .await + .map(|val| val.unwrap_task()) .ok(); - println!("{task:?}"); - let result_task = if let Some(some_task) = task { - Some( - Self::update_task_state_query( - transaction, - backend, - &some_task, - FangTaskState::InProgress, - ) - .await?, - ) + let query_params = QueryParams::builder() + .uuid(&some_task.id) + .state(FangTaskState::InProgress) + .build(); + + let task = backend + .execute_query(SqlXQuery::UpdateTaskState, transaction, query_params) + .await? + .unwrap_task(); + + Some(task) } else { None }; Ok(result_task) } - async fn get_task_type_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - task_type: &str, - ) -> Result { - let query = backend.select_query(SqlXQuery::FetchTaskType); - - let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - let task: Task = sqlx::query_as(query) - .bind(task_type) - .bind(now_str) - .fetch_one(transaction.acquire().await?) - .await?; - - Ok(task) - } - - async fn update_task_state_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - task: &Task, - state: FangTaskState, - ) -> Result { - let query = backend.select_query(SqlXQuery::UpdateTaskState); - - let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - let state_str: &str = state.into(); - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = task.id.as_hyphenated().encode_lower(&mut buffer); - - let task: Task = sqlx::query_as(query) - .bind(state_str) - .bind(updated_at_str) - .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) - .await?; - - Ok(task) - } - async fn insert_task_query( transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, - metadata: serde_json::Value, + metadata: &serde_json::Value, task_type: &str, - scheduled_at: DateTime, + scheduled_at: &DateTime, ) -> Result { - let query = backend.select_query(SqlXQuery::InsertTask); - - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + let query_params = QueryParams::builder() + .metadata(&metadata) + .task_type(task_type) + .scheduled_at(scheduled_at) + .build(); - let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let task = backend + .execute_query(SqlXQuery::InsertTask, transaction, query_params) + .await? + .unwrap_task(); - let task: Task = sqlx::query_as(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(scheduled_at_str) - .fetch_one(transaction.acquire().await?) - .await?; Ok(task) } async fn insert_task_uniq_query( transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, - metadata: serde_json::Value, + metadata: &serde_json::Value, task_type: &str, - scheduled_at: DateTime, + scheduled_at: &DateTime, ) -> Result { - let query = backend.select_query(SqlXQuery::InsertTaskUniq); - - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let uniq_hash = Self::calculate_hash(metadata.to_string()); - - let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let query_params = QueryParams::builder() + .metadata(&metadata) + .task_type(task_type) + .scheduled_at(scheduled_at) + .build(); - let task: Task = sqlx::query_as(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(uniq_hash) - .bind(scheduled_at_str) - .fetch_one(transaction.acquire().await?) - .await?; + let task = backend + .execute_query(SqlXQuery::InsertTaskUniq, transaction, query_params) + .await? + .unwrap_task(); Ok(task) } async fn insert_task_if_not_exist_query( transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, - metadata: serde_json::Value, + metadata: &serde_json::Value, task_type: &str, - scheduled_at: DateTime, + scheduled_at: &DateTime, ) -> Result { match Self::find_task_by_uniq_hash_query(transaction, backend, &metadata).await { Some(task) => Ok(task), @@ -644,27 +431,18 @@ impl AsyncQueue { } } - fn calculate_hash(json: String) -> String { - let mut hasher = Sha256::new(); - hasher.update(json.as_bytes()); - let result = hasher.finalize(); - hex::encode(result) - } - async fn find_task_by_uniq_hash_query( transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, metadata: &serde_json::Value, ) -> Option { - let query = backend.select_query(SqlXQuery::FindTaskByUniqHash); - - let uniq_hash = Self::calculate_hash(metadata.to_string()); + let query_params = QueryParams::builder().metadata(metadata).build(); - sqlx::query_as(query) - .bind(uniq_hash) - .fetch_one(transaction.acquire().await.ok()?) + backend + .execute_query(SqlXQuery::FindTaskByUniqHash, transaction, query_params) .await - .ok() + .ok()? + .unwrap_opt_task() } async fn schedule_task_query( @@ -696,18 +474,18 @@ impl AsyncQueue { Self::insert_task_query( transaction, backend, - metadata, + &metadata, &task.task_type(), - scheduled_at, + &scheduled_at, ) .await? } else { Self::insert_task_if_not_exist_query( transaction, backend, - metadata, + &metadata, &task.task_type(), - scheduled_at, + &scheduled_at, ) .await? }; @@ -721,7 +499,13 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = Self::find_task_by_id_query(&mut transaction, &self.backend, id).await?; + let query_params = QueryParams::builder().uuid(id).build(); + + let task = self + .backend + .execute_query(SqlXQuery::FindTaskById, &mut transaction, query_params) + .await? + .unwrap_task(); transaction.commit().await?; @@ -752,18 +536,18 @@ impl AsyncQueueable for AsyncQueue { Self::insert_task_query( &mut transaction, &self.backend, - metadata, + &metadata, &task.task_type(), - Utc::now(), + &Utc::now(), ) .await? } else { Self::insert_task_if_not_exist_query( &mut transaction, &self.backend, - metadata, + &metadata, &task.task_type(), - Utc::now(), + &Utc::now(), ) .await? }; @@ -788,7 +572,13 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_all_tasks_query(&mut transaction, &self.backend).await?; + let query_params = QueryParams::builder().build(); + + let result = self + .backend + .execute_query(SqlXQuery::RemoveAllTask, &mut transaction, query_params) + .await? + .unwrap_u64(); transaction.commit().await?; @@ -799,8 +589,17 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = - Self::remove_all_scheduled_tasks_query(&mut transaction, &self.backend).await?; + let query_params = QueryParams::builder().build(); + + let result = self + .backend + .execute_query( + SqlXQuery::RemoveAllScheduledTask, + &mut transaction, + query_params, + ) + .await? + .unwrap_u64(); transaction.commit().await?; @@ -811,7 +610,13 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = Self::remove_task_query(&mut transaction, &self.backend, id).await?; + let query_params = QueryParams::builder().uuid(id).build(); + + let result = self + .backend + .execute_query(SqlXQuery::RemoveTask, &mut transaction, query_params) + .await? + .unwrap_u64(); transaction.commit().await?; @@ -826,8 +631,17 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = - Self::remove_task_by_metadata_query(&mut transaction, &self.backend, task).await?; + let query_params = QueryParams::builder().runnable(task).build(); + + let result = self + .backend + .execute_query( + SqlXQuery::RemoveTaskByMetadata, + &mut transaction, + query_params, + ) + .await? + .unwrap_u64(); transaction.commit().await?; @@ -841,8 +655,13 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let result = - Self::remove_tasks_type_query(&mut transaction, &self.backend, task_type).await?; + let query_params = QueryParams::builder().task_type(task_type).build(); + + let result = self + .backend + .execute_query(SqlXQuery::RemoveTaskType, &mut transaction, query_params) + .await? + .unwrap_u64(); transaction.commit().await?; @@ -857,8 +676,13 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = - Self::update_task_state_query(&mut transaction, &self.backend, task, state).await?; + let query_params = QueryParams::builder().uuid(&task.id).state(state).build(); + + let task = self + .backend + .execute_query(SqlXQuery::UpdateTaskState, &mut transaction, query_params) + .await? + .unwrap_task(); transaction.commit().await?; @@ -873,12 +697,20 @@ impl AsyncQueueable for AsyncQueue { self.check_if_connection()?; let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let task = - Self::fail_task_query(&mut transaction, &self.backend, task, error_message).await?; + let query_params = QueryParams::builder() + .error_message(error_message) + .task(task) + .build(); + + let failed_task = self + .backend + .execute_query(SqlXQuery::FailTask, &mut transaction, query_params) + .await? + .unwrap_task(); transaction.commit().await?; - Ok(task) + Ok(failed_task) } async fn schedule_retry( @@ -891,14 +723,17 @@ impl AsyncQueueable for AsyncQueue { let mut transaction = self.pool.as_ref().unwrap().begin().await?; - let failed_task = Self::schedule_retry_query( - &mut transaction, - &self.backend, - task, - backoff_seconds, - error, - ) - .await?; + let query_params = QueryParams::builder() + .backoff_seconds(backoff_seconds) + .error_message(error) + .task(task) + .build(); + + let failed_task = self + .backend + .execute_query(SqlXQuery::RetryTask, &mut transaction, query_params) + .await? + .unwrap_task(); transaction.commit().await?; diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index c0abecb7..e86d42e3 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -1,4 +1,12 @@ +use chrono::DateTime; +use chrono::Duration; +use chrono::Utc; +use sha2::Digest; +use sha2::Sha256; +use sqlx::{Acquire, Any, Transaction}; use std::fmt::Debug; +use typed_builder::TypedBuilder; +use uuid::Uuid; const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task.sql"); const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task_uniq.sql"); @@ -61,6 +69,57 @@ pub(crate) enum BackendSqlX { NoBackend, } +#[derive(TypedBuilder, Clone)] +pub(crate) struct QueryParams<'a> { + #[builder(default, setter(strip_option))] + uuid: Option<&'a Uuid>, + #[builder(default, setter(strip_option))] + metadata: Option<&'a serde_json::Value>, + #[builder(default, setter(strip_option))] + task_type: Option<&'a str>, + #[builder(default, setter(strip_option))] + scheduled_at: Option<&'a DateTime>, + #[builder(default, setter(strip_option))] + state: Option, + #[builder(default, setter(strip_option))] + error_message: Option<&'a str>, + #[builder(default, setter(strip_option))] + runnable: Option<&'a dyn AsyncRunnable>, + #[builder(default, setter(strip_option))] + backoff_seconds: Option, + #[builder(default, setter(strip_option))] + task: Option<&'a Task>, +} + +pub(crate) enum Res { + BIGINT(u64), + Task(Task), + OptTask(Option), +} + +impl Res { + pub(crate) fn unwrap_u64(self) -> u64 { + match self { + Res::BIGINT(val) => val, + _ => panic!("Can not unwrap a u64"), + } + } + + pub(crate) fn unwrap_task(self) -> Task { + match self { + Res::Task(task) => task, + _ => panic!("Can not unwrap a task"), + } + } + + pub(crate) fn unwrap_opt_task(self) -> Option { + match self { + Res::OptTask(opt_task) => opt_task, + _ => panic!("Can not unwrap a opt_task"), + } + } +} + impl BackendSqlX { pub fn new_with_name(name: &str) -> BackendSqlX { match name { @@ -71,11 +130,18 @@ impl BackendSqlX { } } - pub(crate) fn select_query<'a>(&self, query: SqlXQuery) -> &'a str { + pub(crate) async fn execute_query<'a>( + &self, + query: SqlXQuery, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, + ) -> Result { match self { - BackendSqlX::Pg => BackendSqlXPg::select_query(query), - BackendSqlX::Sqlite => BackendSqlXSQLite::select_query(query), - BackendSqlX::Mysql => BackendSqlXMySQL::select_query(query), + BackendSqlX::Pg => BackendSqlXPg::execute_query(query, transaction, params).await, + BackendSqlX::Sqlite => { + BackendSqlXSQLite::execute_query(query, transaction, params).await + } + BackendSqlX::Mysql => BackendSqlXMySQL::execute_query(query, transaction, params).await, _ => unreachable!(), } } @@ -112,22 +178,127 @@ pub(crate) enum SqlXQuery { struct BackendSqlXPg {} use SqlXQuery as Q; + +use crate::AsyncRunnable; +use crate::FangTaskState; +use crate::{AsyncQueueError, Task}; impl BackendSqlXPg { - fn select_query(query: SqlXQuery) -> &'static str { + async fn execute_query( + query: SqlXQuery, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, + ) -> Result { match query { - Q::InsertTask => INSERT_TASK_QUERY_POSTGRES, - Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_POSTGRES, - Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_POSTGRES, - Q::FailTask => FAIL_TASK_QUERY_POSTGRES, - Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_POSTGRES, - Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, - Q::RemoveTask => REMOVE_TASK_QUERY_POSTGRES, - Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, - Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_POSTGRES, - Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_POSTGRES, - Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, - Q::FindTaskById => FIND_TASK_BY_ID_QUERY_POSTGRES, - Q::RetryTask => RETRY_TASK_QUERY_POSTGRES, + Q::InsertTask => { + let task = + general_any_impl_insert_task(INSERT_TASK_QUERY_POSTGRES, transaction, params) + .await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskUniq => { + let task = general_any_impl_insert_task_uniq( + INSERT_TASK_UNIQ_QUERY_POSTGRES, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = general_any_impl_update_task_state( + UPDATE_TASK_STATE_QUERY_POSTGRES, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FailTask => { + let task = + general_any_impl_fail_task(FAIL_TASK_QUERY_POSTGRES, transaction, params) + .await?; + + Ok(Res::Task(task)) + } + Q::RemoveAllTask => { + let affected_rows = + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, transaction) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveAllScheduledTask => { + let affected_rows = general_any_impl_remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + transaction, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTask => { + let affected_rows = + general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, transaction, params) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = general_any_impl_remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + transaction, + params, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = general_any_impl_remove_task_type( + REMOVE_TASKS_TYPE_QUERY_POSTGRES, + transaction, + params, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::FetchTaskType => { + let task = general_any_impl_fetch_task_type( + FETCH_TASK_TYPE_QUERY_POSTGRES, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskByUniqHash => { + let opt_task: Option = general_any_impl_find_task_by_uniq_hash( + FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + transaction, + params, + ) + .await; + + Ok(Res::OptTask(opt_task)) + } + Q::FindTaskById => { + let task = general_any_impl_find_task_by_id( + FIND_TASK_BY_ID_QUERY_POSTGRES, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = + general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, transaction, params) + .await?; + + Ok(Res::Task(task)) + } } } @@ -140,21 +311,121 @@ impl BackendSqlXPg { struct BackendSqlXSQLite {} impl BackendSqlXSQLite { - fn select_query(query: SqlXQuery) -> &'static str { + async fn execute_query( + query: SqlXQuery, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, + ) -> Result { match query { - Q::InsertTask => INSERT_TASK_QUERY_SQLITE, - Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_SQLITE, - Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_SQLITE, - Q::FailTask => FAIL_TASK_QUERY_SQLITE, - Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_SQLITE, - Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, - Q::RemoveTask => REMOVE_TASK_QUERY_SQLITE, - Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_SQLITE, - Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_SQLITE, - Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_SQLITE, - Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, - Q::FindTaskById => FIND_TASK_BY_ID_QUERY_SQLITE, - Q::RetryTask => RETRY_TASK_QUERY_SQLITE, + Q::InsertTask => { + let task = + general_any_impl_insert_task(INSERT_TASK_QUERY_SQLITE, transaction, params) + .await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskUniq => { + let task = general_any_impl_insert_task_uniq( + INSERT_TASK_UNIQ_QUERY_SQLITE, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = general_any_impl_update_task_state( + UPDATE_TASK_STATE_QUERY_SQLITE, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FailTask => { + let task = + general_any_impl_fail_task(FAIL_TASK_QUERY_SQLITE, transaction, params).await?; + + Ok(Res::Task(task)) + } + Q::RemoveAllTask => { + let affected_rows = + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, transaction) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveAllScheduledTask => { + let affected_rows = general_any_impl_remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + transaction, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTask => { + let affected_rows = + general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, transaction, params) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = general_any_impl_remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + transaction, + params, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = general_any_impl_remove_task_type( + REMOVE_TASKS_TYPE_QUERY_SQLITE, + transaction, + params, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::FetchTaskType => { + let task = general_any_impl_fetch_task_type( + FETCH_TASK_TYPE_QUERY_SQLITE, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskByUniqHash => { + let opt_task: Option = general_any_impl_find_task_by_uniq_hash( + FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + transaction, + params, + ) + .await; + + Ok(Res::OptTask(opt_task)) + } + Q::FindTaskById => { + let task = general_any_impl_find_task_by_id( + FIND_TASK_BY_ID_QUERY_SQLITE, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = + general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, transaction, params) + .await?; + + Ok(Res::Task(task)) + } } } @@ -163,25 +434,404 @@ impl BackendSqlXSQLite { } } +async fn general_any_impl_insert_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let metadata_str = params.metadata.unwrap().to_string(); + let task_type = params.task_type.unwrap(); + + let task: Task = sqlx::query_as(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(scheduled_at_str) + .fetch_one(transaction.acquire().await?) + .await?; + + Ok(task) +} + +pub(crate) fn calculate_hash(json: &str) -> String { + let mut hasher = Sha256::new(); + hasher.update(json.as_bytes()); + let result = hasher.finalize(); + hex::encode(result) +} + +async fn general_any_impl_insert_task_uniq( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let task: Task = sqlx::query_as(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at_str) + .fetch_one(transaction.acquire().await?) + .await?; + Ok(task) +} + +async fn general_any_impl_update_task_state( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + let state_str: &str = params.state.unwrap().into(); + + let uuid = params.uuid.unwrap(); + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let task: Task = sqlx::query_as(query) + .bind(state_str) + .bind(updated_at_str) + .bind(&*uuid_as_text) + .fetch_one(transaction.acquire().await?) + .await?; + + Ok(task) +} + +async fn general_any_impl_fail_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + + let id = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + + let error_message = params.error_message.unwrap(); + + let failed_task: Task = sqlx::query_as(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&*uuid_as_text) + .fetch_one(transaction.acquire().await?) + .await?; + + Ok(failed_task) +} + +async fn general_any_impl_remove_all_task( + query: &str, + transaction: &mut Transaction<'_, Any>, +) -> Result { + Ok(sqlx::query(query) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) +} + +async fn general_any_impl_remove_all_scheduled_tasks( + query: &str, + transaction: &mut Transaction<'_, Any>, +) -> Result { + let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + Ok(sqlx::query(query) + .bind(now_str) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) +} + +async fn general_any_impl_remove_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .uuid + .unwrap() + .as_hyphenated() + .encode_lower(&mut buffer); + + let result = sqlx::query(query) + .bind(&*uuid_as_text) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if result != 1 { + Err(AsyncQueueError::ResultError { + expected: 1, + found: result, + }) + } else { + Ok(result) + } +} + +async fn general_any_impl_remove_task_by_metadata( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let metadata = serde_json::to_value(params.runnable.unwrap())?; + + let uniq_hash = calculate_hash(&metadata.to_string()); + + Ok(sqlx::query(query) + .bind(uniq_hash) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) +} + +async fn general_any_impl_remove_task_type( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let task_type = params.task_type.unwrap(); + + Ok(sqlx::query(query) + .bind(task_type) + .execute(transaction.acquire().await?) + .await? + .rows_affected()) +} + +async fn general_any_impl_fetch_task_type( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let task_type = params.task_type.unwrap(); + + let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + let task: Task = sqlx::query_as(query) + .bind(task_type) + .bind(now_str) + .fetch_one(transaction.acquire().await?) + .await?; + + Ok(task) +} + +async fn general_any_impl_find_task_by_uniq_hash( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Option { + let metadata = params.metadata.unwrap(); + + let uniq_hash = calculate_hash(&metadata.to_string()); + + sqlx::query_as(query) + .bind(uniq_hash) + .fetch_one(transaction.acquire().await.ok()?) + .await + .ok() +} + +async fn general_any_impl_find_task_by_id( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .uuid + .unwrap() + .as_hyphenated() + .encode_lower(&mut buffer); + + let task: Task = sqlx::query_as(query) + .bind(&*uuid_as_text) + .fetch_one(transaction.acquire().await?) + .await?; + + Ok(task) +} + +async fn general_any_impl_retry_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let now = Utc::now(); + let now_str = format!("{}", now.format("%F %T%.f+00")); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let retries = params.task.unwrap().retries + 1; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .task + .unwrap() + .id + .as_hyphenated() + .encode_lower(&mut buffer); + + let error = params.error_message.unwrap(); + + let failed_task: Task = sqlx::query_as(query) + .bind(error) + .bind(retries) + .bind(scheduled_at_str) + .bind(now_str) + .bind(&*uuid_as_text) + .fetch_one(transaction.acquire().await?) + .await?; + + Ok(failed_task) +} + #[derive(Debug, Clone)] struct BackendSqlXMySQL {} impl BackendSqlXMySQL { - fn select_query(query: SqlXQuery) -> &'static str { + async fn execute_query( + query: SqlXQuery, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, + ) -> Result { match query { - Q::InsertTask => INSERT_TASK_QUERY_MYSQL, - Q::InsertTaskUniq => INSERT_TASK_UNIQ_QUERY_MYSQL, - Q::UpdateTaskState => UPDATE_TASK_STATE_QUERY_MYSQL, - Q::FailTask => FAIL_TASK_QUERY_MYSQL, - Q::RemoveAllTask => REMOVE_ALL_TASK_QUERY_MYSQL, - Q::RemoveAllScheduledTask => REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, - Q::RemoveTask => REMOVE_TASK_QUERY_MYSQL, - Q::RemoveTaskByMetadata => REMOVE_TASK_BY_METADATA_QUERY_MYSQL, - Q::RemoveTaskType => REMOVE_TASKS_TYPE_QUERY_MYSQL, - Q::FetchTaskType => FETCH_TASK_TYPE_QUERY_MYSQL, - Q::FindTaskByUniqHash => FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, - Q::FindTaskById => FIND_TASK_BY_ID_QUERY_MYSQL, - Q::RetryTask => RETRY_TASK_QUERY_MYSQL, + Q::InsertTask => { + let task = + mysql_impl_insert_task(INSERT_TASK_QUERY_MYSQL, transaction, params).await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskUniq => { + let task = + mysql_impl_insert_task_uniq(INSERT_TASK_UNIQ_QUERY_MYSQL, transaction, params) + .await?; + Ok(Res::Task(task)) + } + + Q::UpdateTaskState => { + let task = mysql_impl_update_task_state( + UPDATE_TASK_STATE_QUERY_MYSQL, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + + Q::FailTask => { + let task = mysql_impl_fail_task(FAIL_TASK_QUERY_MYSQL, transaction, params).await?; + + Ok(Res::Task(task)) + } + + Q::RemoveAllTask => { + let affected_rows = + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, transaction) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + + Q::RemoveAllScheduledTask => { + let affected_rows = general_any_impl_remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, + transaction, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + + Q::RemoveTask => { + let affected_rows = + general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, transaction, params) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = general_any_impl_remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_MYSQL, + transaction, + params, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = general_any_impl_remove_task_type( + REMOVE_TASKS_TYPE_QUERY_MYSQL, + transaction, + params, + ) + .await?; + + Ok(Res::BIGINT(affected_rows)) + } + Q::FetchTaskType => { + let task = general_any_impl_fetch_task_type( + FETCH_TASK_TYPE_QUERY_MYSQL, + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskByUniqHash => { + let opt_task: Option = general_any_impl_find_task_by_uniq_hash( + FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, + transaction, + params, + ) + .await; + + Ok(Res::OptTask(opt_task)) + } + Q::FindTaskById => { + let task: Task = general_any_impl_find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + transaction, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = + mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, transaction, params).await?; + + Ok(Res::Task(task)) + } } } @@ -189,3 +839,198 @@ impl BackendSqlXMySQL { "MySQL" } } + +async fn mysql_impl_insert_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let metadata_str = params.metadata.unwrap().to_string(); + let task_type = params.task_type.unwrap(); + + let affected_rows = sqlx::query(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(scheduled_at_str) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if affected_rows != 1 { + // here we should return an error + panic!("fock") + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) + .await?; + + Ok(task) +} + +async fn mysql_impl_insert_task_uniq( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let affected_rows = sqlx::query(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at_str) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if affected_rows != 1 { + // here we should return an error + panic!("fock") + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) + .await?; + + Ok(task) +} + +async fn mysql_impl_update_task_state( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + let state_str: &str = params.state.unwrap().into(); + + let uuid = params.uuid.unwrap(); + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let affected_rows = sqlx::query(query) + .bind(state_str) + .bind(updated_at_str) + .bind(&*uuid_as_text) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if affected_rows != 1 { + // here we should return an error + panic!("fock") + } + + let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); + + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) + .await?; + + Ok(task) +} + +async fn mysql_impl_fail_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + + let id = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + + let error_message = params.error_message.unwrap(); + + let affected_rows = sqlx::query(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&*uuid_as_text) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if affected_rows != 1 { + // here we should return an error + panic!("fock") + } + + let query_params = QueryParams::builder().uuid(&id).build(); + + let failed_task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) + .await?; + + Ok(failed_task) +} + +async fn mysql_impl_retry_task( + query: &str, + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + let now = Utc::now(); + let now_str = format!("{}", now.format("%F %T%.f+00")); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let retries = params.task.unwrap().retries + 1; + + let uuid = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let error = params.error_message.unwrap(); + + let affected_rows = sqlx::query(query) + .bind(error) + .bind(retries) + .bind(scheduled_at_str) + .bind(now_str) + .bind(&*uuid_as_text) + .execute(transaction.acquire().await?) + .await? + .rows_affected(); + + if affected_rows != 1 { + // here we should return an error + panic!("fock") + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let failed_task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) + .await?; + + Ok(failed_task) +} diff --git a/fang/src/asynk/queries_mysql/fail_task.sql b/fang/src/asynk/queries_mysql/fail_task.sql index b89d13d1..481c27d3 100644 --- a/fang/src/asynk/queries_mysql/fail_task.sql +++ b/fang/src/asynk/queries_mysql/fail_task.sql @@ -1,3 +1 @@ -UPDATE fang_tasks SET state = $1 , error_message = $2 , updated_at = $3 WHERE id = $4 ; - -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $4 ; +UPDATE fang_tasks SET state = ? , error_message = ? , updated_at = ? WHERE id = ? ; diff --git a/fang/src/asynk/queries_mysql/fetch_task_type.sql b/fang/src/asynk/queries_mysql/fetch_task_type.sql index 02c3f9f4..51929152 100644 --- a/fang/src/asynk/queries_mysql/fetch_task_type.sql +++ b/fang/src/asynk/queries_mysql/fetch_task_type.sql @@ -1 +1 @@ -SELECT id , metadata , error_message, state, task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 +SELECT * FROM fang_tasks WHERE task_type = ? AND state in ('new', 'retried') AND ? >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_mysql/find_task_by_id.sql b/fang/src/asynk/queries_mysql/find_task_by_id.sql index 60b4cf93..234cf1f5 100644 --- a/fang/src/asynk/queries_mysql/find_task_by_id.sql +++ b/fang/src/asynk/queries_mysql/find_task_by_id.sql @@ -1 +1 @@ -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1 +SELECT * FROM fang_tasks WHERE id = ?; diff --git a/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql index d12443ad..9250db9a 100644 --- a/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql +++ b/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql @@ -1 +1 @@ -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 +SELECT * FROM fang_tasks WHERE uniq_hash = ? AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_mysql/insert_task.sql b/fang/src/asynk/queries_mysql/insert_task.sql index 6f0c2bba..1045f2f9 100644 --- a/fang/src/asynk/queries_mysql/insert_task.sql +++ b/fang/src/asynk/queries_mysql/insert_task.sql @@ -1,7 +1 @@ -BEGIN - -INSERT INTO fang_tasks (id, metadata, task_type, scheduled_at) VALUES (?, ?, ?, ?); - -SELECT * FROM fang_tasks WHERE id = 'uuid'; - -END \ No newline at end of file +INSERT INTO fang_tasks (id, metadata, task_type, scheduled_at) VALUES (?, ?, ?, ?); \ No newline at end of file diff --git a/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql index db72fce8..80e20846 100644 --- a/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql +++ b/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql @@ -1 +1 @@ -DELETE FROM fang_tasks WHERE scheduled_at > $1 +DELETE FROM fang_tasks WHERE scheduled_at > ? diff --git a/fang/src/asynk/queries_mysql/remove_task.sql b/fang/src/asynk/queries_mysql/remove_task.sql index 4a384bd7..2cc4ddc2 100644 --- a/fang/src/asynk/queries_mysql/remove_task.sql +++ b/fang/src/asynk/queries_mysql/remove_task.sql @@ -1 +1 @@ -DELETE FROM fang_tasks WHERE id = $1 +DELETE FROM fang_tasks WHERE id = ? diff --git a/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql index 85cb4eea..966ab747 100644 --- a/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql +++ b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql @@ -1 +1 @@ -DELETE FROM fang_tasks WHERE uniq_hash = $1 +DELETE FROM fang_tasks WHERE uniq_hash = ? diff --git a/fang/src/asynk/queries_mysql/remove_tasks_type.sql b/fang/src/asynk/queries_mysql/remove_tasks_type.sql index a12477fc..a415d20a 100644 --- a/fang/src/asynk/queries_mysql/remove_tasks_type.sql +++ b/fang/src/asynk/queries_mysql/remove_tasks_type.sql @@ -1 +1 @@ -DELETE FROM fang_tasks WHERE task_type = $1 +DELETE FROM fang_tasks WHERE task_type = ? diff --git a/fang/src/asynk/queries_mysql/retry_task.sql b/fang/src/asynk/queries_mysql/retry_task.sql index 4be5ea8f..b0481720 100644 --- a/fang/src/asynk/queries_mysql/retry_task.sql +++ b/fang/src/asynk/queries_mysql/retry_task.sql @@ -1,3 +1 @@ -UPDATE fang_tasks SET state = 'retried' , error_message = $1, retries = $2, scheduled_at = $3, updated_at = $4 WHERE id = $5; - -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $5 ; \ No newline at end of file +UPDATE fang_tasks SET state = 'retried' , error_message = ?, retries = ?, scheduled_at = ?, updated_at = ? WHERE id = ?; \ No newline at end of file diff --git a/fang/src/asynk/queries_mysql/update_task_state.sql b/fang/src/asynk/queries_mysql/update_task_state.sql index eefb3c23..237da09f 100644 --- a/fang/src/asynk/queries_mysql/update_task_state.sql +++ b/fang/src/asynk/queries_mysql/update_task_state.sql @@ -1,5 +1 @@ -BEGIN - -UPDATE fang_tasks SET state = $1 , updated_at = $2 WHERE id = $3; - -END \ No newline at end of file +UPDATE fang_tasks SET state = ? , updated_at = ? WHERE id = ?; \ No newline at end of file diff --git a/fang/src/asynk/queries_sqlite/find_task_by_id.sql b/fang/src/asynk/queries_sqlite/find_task_by_id.sql index 60b4cf93..608166f5 100644 --- a/fang/src/asynk/queries_sqlite/find_task_by_id.sql +++ b/fang/src/asynk/queries_sqlite/find_task_by_id.sql @@ -1 +1 @@ -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1 +SELECT * FROM fang_tasks WHERE id = $1 diff --git a/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql index d12443ad..cb53f45c 100644 --- a/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql +++ b/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql @@ -1 +1 @@ -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 +SELECT * FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_sqlite/insert_task.sql b/fang/src/asynk/queries_sqlite/insert_task.sql index 9cca503e..f188b0d8 100644 --- a/fang/src/asynk/queries_sqlite/insert_task.sql +++ b/fang/src/asynk/queries_sqlite/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3, $4 ) RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3, $4 ) RETURNING * diff --git a/fang/src/asynk/queries_sqlite/insert_task_uniq.sql b/fang/src/asynk/queries_sqlite/insert_task_uniq.sql index 9ffc4499..f21dc2a5 100644 --- a/fang/src/asynk/queries_sqlite/insert_task_uniq.sql +++ b/fang/src/asynk/queries_sqlite/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2 , $3, $4, $5 ) RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2 , $3, $4, $5 ) RETURNING * diff --git a/fang/src/asynk/queries_sqlite/retry_task.sql b/fang/src/asynk/queries_sqlite/retry_task.sql index ae0d95b1..f26267cd 100644 --- a/fang/src/asynk/queries_sqlite/retry_task.sql +++ b/fang/src/asynk/queries_sqlite/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5 RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5 RETURNING * diff --git a/fang/src/asynk/queries_sqlite/update_task_state.sql b/fang/src/asynk/queries_sqlite/update_task_state.sql index a24fddf8..a796e7db 100644 --- a/fang/src/asynk/queries_sqlite/update_task_state.sql +++ b/fang/src/asynk/queries_sqlite/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1 , "updated_at" = $2 WHERE id = $3 RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +UPDATE "fang_tasks" SET "state" = $1 , "updated_at" = $2 WHERE id = $3 RETURNING * \ No newline at end of file diff --git a/fang/src/blocking/mysql_schema.rs b/fang/src/blocking/mysql_schema.rs index 4b98594f..8445e156 100644 --- a/fang/src/blocking/mysql_schema.rs +++ b/fang/src/blocking/mysql_schema.rs @@ -13,8 +13,10 @@ diesel::table! { fang_tasks (id) { #[max_length = 36] id -> Varchar, - metadata -> Json, - error_message -> Nullable, + #[max_length = 2048] + metadata -> Varchar, + #[max_length = 2048] + error_message -> Nullable, #[max_length = 11] state -> FangTasksStateEnum, #[max_length = 255] @@ -22,8 +24,11 @@ diesel::table! { #[max_length = 64] uniq_hash -> Nullable, retries -> Integer, - scheduled_at -> Timestamp, - created_at -> Timestamp, - updated_at -> Timestamp, + #[max_length = 32] + scheduled_at -> Varchar, + #[max_length = 32] + created_at -> Varchar, + #[max_length = 32] + updated_at -> Varchar, } } diff --git a/fang/src/lib.rs b/fang/src/lib.rs index 1db8e4f0..4a382173 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -210,8 +210,6 @@ impl<'a> FromRow<'a, AnyRow> for Task { let scheduled_at_str: &str = row.get("scheduled_at"); - println!("{}", scheduled_at_str); - let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") .unwrap() .into(); From 16e94ef60ce90c8e9b26576150815c9ba8d7615d Mon Sep 17 00:00:00 2001 From: pxp9 Date: Sat, 2 Sep 2023 12:09:51 +0200 Subject: [PATCH 28/90] debugging issue : the issue is related to how a uniq task is inserted in MySQL backend --- .../up.sql | 2 +- fang/src/asynk/async_queue.rs | 43 +----- fang/src/asynk/backend_sqlx.rs | 136 +++++++++--------- .../asynk/queries_mysql/insert_task_uniq.sql | 6 +- .../queries_mysql/remove_task_by_metadata.sql | 2 +- fang/src/blocking/mysql_schema.rs | 2 +- 6 files changed, 78 insertions(+), 113 deletions(-) diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 4cd858d6..4fd52060 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -15,7 +15,7 @@ CREATE TABLE fang_tasks ( error_message VARCHAR(2048), state ENUM('new', 'in_progress', 'failed', 'finished', 'retried') NOT NULL DEFAULT 'new', task_type VARCHAR(255) NOT NULL DEFAULT 'common', -- TEXT type can not have default value, stupid MySQL policy - uniq_hash CHAR(64), + uniq_hash VARCHAR(64), retries INTEGER NOT NULL DEFAULT 0, scheduled_at VARCHAR(32) NOT NULL DEFAULT(CONCAT(CURRENT_TIMESTAMP, '.000000000+00')), created_at VARCHAR(32) NOT NULL DEFAULT (CONCAT(CURRENT_TIMESTAMP , '.000000000+00')), diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index d3701b7b..472eddc5 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -389,7 +389,7 @@ impl AsyncQueue { Ok(task) } - async fn insert_task_uniq_query( + async fn insert_task_if_not_exist_query( transaction: &mut Transaction<'_, Any>, backend: &BackendSqlX, metadata: &serde_json::Value, @@ -397,52 +397,17 @@ impl AsyncQueue { scheduled_at: &DateTime, ) -> Result { let query_params = QueryParams::builder() - .metadata(&metadata) + .metadata(metadata) .task_type(task_type) .scheduled_at(scheduled_at) .build(); let task = backend - .execute_query(SqlXQuery::InsertTaskUniq, transaction, query_params) + .execute_query(SqlXQuery::InsertTaskIfNotExists, transaction, query_params) .await? .unwrap_task(); - Ok(task) - } - async fn insert_task_if_not_exist_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - metadata: &serde_json::Value, - task_type: &str, - scheduled_at: &DateTime, - ) -> Result { - match Self::find_task_by_uniq_hash_query(transaction, backend, &metadata).await { - Some(task) => Ok(task), - None => { - Self::insert_task_uniq_query( - transaction, - backend, - metadata, - task_type, - scheduled_at, - ) - .await - } - } - } - - async fn find_task_by_uniq_hash_query( - transaction: &mut Transaction<'_, Any>, - backend: &BackendSqlX, - metadata: &serde_json::Value, - ) -> Option { - let query_params = QueryParams::builder().metadata(metadata).build(); - - backend - .execute_query(SqlXQuery::FindTaskByUniqHash, transaction, query_params) - .await - .ok()? - .unwrap_opt_task() + Ok(task) } async fn schedule_task_query( diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index e86d42e3..06dabd2a 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -94,7 +94,6 @@ pub(crate) struct QueryParams<'a> { pub(crate) enum Res { BIGINT(u64), Task(Task), - OptTask(Option), } impl Res { @@ -111,13 +110,6 @@ impl Res { _ => panic!("Can not unwrap a task"), } } - - pub(crate) fn unwrap_opt_task(self) -> Option { - match self { - Res::OptTask(opt_task) => opt_task, - _ => panic!("Can not unwrap a opt_task"), - } - } } impl BackendSqlX { @@ -160,7 +152,6 @@ impl BackendSqlX { #[derive(Debug, Clone)] pub(crate) enum SqlXQuery { InsertTask, - InsertTaskUniq, UpdateTaskState, FailTask, RemoveAllTask, @@ -169,9 +160,9 @@ pub(crate) enum SqlXQuery { RemoveTaskByMetadata, RemoveTaskType, FetchTaskType, - FindTaskByUniqHash, FindTaskById, RetryTask, + InsertTaskIfNotExists, } #[derive(Debug, Clone)] @@ -196,15 +187,6 @@ impl BackendSqlXPg { Ok(Res::Task(task)) } - Q::InsertTaskUniq => { - let task = general_any_impl_insert_task_uniq( - INSERT_TASK_UNIQ_QUERY_POSTGRES, - transaction, - params, - ) - .await?; - Ok(Res::Task(task)) - } Q::UpdateTaskState => { let task = general_any_impl_update_task_state( UPDATE_TASK_STATE_QUERY_POSTGRES, @@ -273,16 +255,6 @@ impl BackendSqlXPg { .await?; Ok(Res::Task(task)) } - Q::FindTaskByUniqHash => { - let opt_task: Option = general_any_impl_find_task_by_uniq_hash( - FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, - transaction, - params, - ) - .await; - - Ok(Res::OptTask(opt_task)) - } Q::FindTaskById => { let task = general_any_impl_find_task_by_id( FIND_TASK_BY_ID_QUERY_POSTGRES, @@ -297,6 +269,19 @@ impl BackendSqlXPg { general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, transaction, params) .await?; + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = general_any_impl_insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + INSERT_TASK_UNIQ_QUERY_POSTGRES, + ), + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) } } @@ -324,15 +309,6 @@ impl BackendSqlXSQLite { Ok(Res::Task(task)) } - Q::InsertTaskUniq => { - let task = general_any_impl_insert_task_uniq( - INSERT_TASK_UNIQ_QUERY_SQLITE, - transaction, - params, - ) - .await?; - Ok(Res::Task(task)) - } Q::UpdateTaskState => { let task = general_any_impl_update_task_state( UPDATE_TASK_STATE_QUERY_SQLITE, @@ -400,16 +376,6 @@ impl BackendSqlXSQLite { .await?; Ok(Res::Task(task)) } - Q::FindTaskByUniqHash => { - let opt_task: Option = general_any_impl_find_task_by_uniq_hash( - FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, - transaction, - params, - ) - .await; - - Ok(Res::OptTask(opt_task)) - } Q::FindTaskById => { let task = general_any_impl_find_task_by_id( FIND_TASK_BY_ID_QUERY_SQLITE, @@ -424,6 +390,19 @@ impl BackendSqlXSQLite { general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, transaction, params) .await?; + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = general_any_impl_insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + INSERT_TASK_UNIQ_QUERY_SQLITE, + ), + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) } } @@ -434,6 +413,17 @@ impl BackendSqlXSQLite { } } +async fn general_any_impl_insert_task_if_not_exists( + queries: (&str, &str), + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + match general_any_impl_find_task_by_uniq_hash(queries.0, transaction, ¶ms).await { + Some(task) => Ok(task), + None => general_any_impl_insert_task_uniq(queries.1, transaction, params).await, + } +} + async fn general_any_impl_insert_task( query: &str, transaction: &mut Transaction<'_, Any>, @@ -604,6 +594,8 @@ async fn general_any_impl_remove_task_by_metadata( let uniq_hash = calculate_hash(&metadata.to_string()); + println!("{query}"); + Ok(sqlx::query(query) .bind(uniq_hash) .execute(transaction.acquire().await?) @@ -646,7 +638,7 @@ async fn general_any_impl_fetch_task_type( async fn general_any_impl_find_task_by_uniq_hash( query: &str, transaction: &mut Transaction<'_, Any>, - params: QueryParams<'_>, + params: &QueryParams<'_>, ) -> Option { let metadata = params.metadata.unwrap(); @@ -729,13 +721,6 @@ impl BackendSqlXMySQL { Ok(Res::Task(task)) } - Q::InsertTaskUniq => { - let task = - mysql_impl_insert_task_uniq(INSERT_TASK_UNIQ_QUERY_MYSQL, transaction, params) - .await?; - Ok(Res::Task(task)) - } - Q::UpdateTaskState => { let task = mysql_impl_update_task_state( UPDATE_TASK_STATE_QUERY_MYSQL, @@ -806,16 +791,6 @@ impl BackendSqlXMySQL { .await?; Ok(Res::Task(task)) } - Q::FindTaskByUniqHash => { - let opt_task: Option = general_any_impl_find_task_by_uniq_hash( - FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, - transaction, - params, - ) - .await; - - Ok(Res::OptTask(opt_task)) - } Q::FindTaskById => { let task: Task = general_any_impl_find_task_by_id( FIND_TASK_BY_ID_QUERY_MYSQL, @@ -830,6 +805,19 @@ impl BackendSqlXMySQL { let task = mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, transaction, params).await?; + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = mysql_any_impl_insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, + INSERT_TASK_UNIQ_QUERY_MYSQL, + ), + transaction, + params, + ) + .await?; + Ok(Res::Task(task)) } } @@ -895,6 +883,10 @@ async fn mysql_impl_insert_task_uniq( let uniq_hash = calculate_hash(&metadata_str); + println!("{} len : {}", uniq_hash, uniq_hash.len()); + + println!("reach here"); + let affected_rows = sqlx::query(query) .bind(uuid_as_str) .bind(metadata_str) @@ -904,6 +896,7 @@ async fn mysql_impl_insert_task_uniq( .execute(transaction.acquire().await?) .await? .rows_affected(); + println!("reach here 2"); if affected_rows != 1 { // here we should return an error @@ -1034,3 +1027,14 @@ async fn mysql_impl_retry_task( Ok(failed_task) } + +async fn mysql_any_impl_insert_task_if_not_exists( + queries: (&str, &str), + transaction: &mut Transaction<'_, Any>, + params: QueryParams<'_>, +) -> Result { + match general_any_impl_find_task_by_uniq_hash(queries.0, transaction, ¶ms).await { + Some(task) => Ok(task), + None => mysql_impl_insert_task_uniq(queries.1, transaction, params).await, + } +} diff --git a/fang/src/asynk/queries_mysql/insert_task_uniq.sql b/fang/src/asynk/queries_mysql/insert_task_uniq.sql index dbbe6d73..000a3d7e 100644 --- a/fang/src/asynk/queries_mysql/insert_task_uniq.sql +++ b/fang/src/asynk/queries_mysql/insert_task_uniq.sql @@ -1,5 +1 @@ -INSERT INTO fang_tasks ( id , metadata, task_type , uniq_hash, scheduled_at) -VALUES ($1, $2 , $3, $4, $5 ) ; - - -SELECT id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1 ; +INSERT INTO fang_tasks(id,metadata,task_type,uniq_hash,scheduled_at) VALUES (?, ? , ?, ?, ?) ; diff --git a/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql index 966ab747..f8474e89 100644 --- a/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql +++ b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql @@ -1 +1 @@ -DELETE FROM fang_tasks WHERE uniq_hash = ? +DELETE FROM fang_tasks WHERE uniq_hash = ? ; diff --git a/fang/src/blocking/mysql_schema.rs b/fang/src/blocking/mysql_schema.rs index 8445e156..d00b1a4f 100644 --- a/fang/src/blocking/mysql_schema.rs +++ b/fang/src/blocking/mysql_schema.rs @@ -22,7 +22,7 @@ diesel::table! { #[max_length = 255] task_type -> Varchar, #[max_length = 64] - uniq_hash -> Nullable, + uniq_hash -> Nullable, retries -> Integer, #[max_length = 32] scheduled_at -> Varchar, From eaa7bc02beb3a226e9092e866741a81dbd888ea8 Mon Sep 17 00:00:00 2001 From: pxp9 Date: Sat, 2 Sep 2023 12:15:50 +0200 Subject: [PATCH 29/90] MySQL own mutex , why Mysql is so gae ? --- fang/src/asynk/async_queue.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 472eddc5..f6d2ab98 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -158,6 +158,9 @@ static ASYNC_QUEUE_POSTGRES_TEST_COUNTER: Mutex = Mutex::const_new(0); #[cfg(test)] static ASYNC_QUEUE_SQLITE_TEST_COUNTER: Mutex = Mutex::const_new(0); +#[cfg(test)] +static ASYNC_QUEUE_MYSQL_TEST_COUNTER: Mutex = Mutex::const_new(0); + #[cfg(test)] use sqlx::Executor; @@ -260,7 +263,7 @@ impl AsyncQueue { .uri(format!("{}/{}", base_url, base_db)) .build(); - let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; + let mut new_number = ASYNC_QUEUE_MYSQL_TEST_COUNTER.lock().await; res.connect().await.unwrap(); let db_name = format!("async_queue_test_{}", *new_number); From f3a14d4f1708fe80ef2b955518d08e2d99e3daab Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 16:20:56 +0300 Subject: [PATCH 30/90] fix clippy --- fang/src/asynk/async_queue.rs | 2 +- fang/src/asynk/backend_sqlx.rs | 34 +++++++++++++++++----------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index f6d2ab98..2e0c8aa4 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -379,7 +379,7 @@ impl AsyncQueue { scheduled_at: &DateTime, ) -> Result { let query_params = QueryParams::builder() - .metadata(&metadata) + .metadata(metadata) .task_type(task_type) .scheduled_at(scheduled_at) .build(); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 06dabd2a..55fe5dce 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -92,14 +92,14 @@ pub(crate) struct QueryParams<'a> { } pub(crate) enum Res { - BIGINT(u64), + Bigint(u64), Task(Task), } impl Res { pub(crate) fn unwrap_u64(self) -> u64 { match self { - Res::BIGINT(val) => val, + Res::Bigint(val) => val, _ => panic!("Can not unwrap a u64"), } } @@ -208,7 +208,7 @@ impl BackendSqlXPg { general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, transaction) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveAllScheduledTask => { let affected_rows = general_any_impl_remove_all_scheduled_tasks( @@ -217,14 +217,14 @@ impl BackendSqlXPg { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTask => { let affected_rows = general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, transaction, params) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskByMetadata => { let affected_rows = general_any_impl_remove_task_by_metadata( @@ -234,7 +234,7 @@ impl BackendSqlXPg { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskType => { let affected_rows = general_any_impl_remove_task_type( @@ -244,7 +244,7 @@ impl BackendSqlXPg { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { let task = general_any_impl_fetch_task_type( @@ -329,7 +329,7 @@ impl BackendSqlXSQLite { general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, transaction) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveAllScheduledTask => { let affected_rows = general_any_impl_remove_all_scheduled_tasks( @@ -338,14 +338,14 @@ impl BackendSqlXSQLite { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTask => { let affected_rows = general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, transaction, params) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskByMetadata => { let affected_rows = general_any_impl_remove_task_by_metadata( @@ -355,7 +355,7 @@ impl BackendSqlXSQLite { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskType => { let affected_rows = general_any_impl_remove_task_type( @@ -365,7 +365,7 @@ impl BackendSqlXSQLite { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { let task = general_any_impl_fetch_task_type( @@ -742,7 +742,7 @@ impl BackendSqlXMySQL { general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, transaction) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveAllScheduledTask => { @@ -752,7 +752,7 @@ impl BackendSqlXMySQL { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTask => { @@ -760,7 +760,7 @@ impl BackendSqlXMySQL { general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, transaction, params) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskByMetadata => { let affected_rows = general_any_impl_remove_task_by_metadata( @@ -770,7 +770,7 @@ impl BackendSqlXMySQL { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskType => { let affected_rows = general_any_impl_remove_task_type( @@ -780,7 +780,7 @@ impl BackendSqlXMySQL { ) .await?; - Ok(Res::BIGINT(affected_rows)) + Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { let task = general_any_impl_fetch_task_type( From c57b0ba2d94235b02c5c01f04629e6b6a2849cdf Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 16:38:23 +0300 Subject: [PATCH 31/90] split test jobs --- .github/workflows/rust.yml | 82 ++++++++++++++++++++++++++++---------- 1 file changed, 61 insertions(+), 21 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 06548127..760ebcdc 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -11,8 +11,29 @@ env : DATABASE_URL : postgres://postgres:postgres@localhost/fang jobs: - test: - name: Test + clippy: + name: Clippy + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + uses: actions-rs/toolchain@v1 + with: + components: clippy + override: true + profile: minimal + toolchain: stable + + - name: Run clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --verbose --all-targets --all-features -- -D warnings + + test_postgres: + name: Test postgres runs-on: ubuntu-latest services: @@ -46,14 +67,45 @@ jobs: profile: minimal toolchain: stable - - name: Run clippy + - name: Install diesel-cli uses: actions-rs/cargo@v1 with: - command: clippy - args: --verbose --all-targets --all-features -- -D warnings + command: install + args: diesel_cli --no-default-features --features postgres + + - name: Setup Postgres db + working-directory: ./fang/postgres_migrations + run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" + + - name: Run tests + uses: actions-rs/cargo@v1 + with: + command: test + args: --verbose --features asynk-postgres + + - name: Run dirty tests + uses: actions-rs/cargo@v1 + with: + command: test + args: --verbose features asynk-postgres -- --ignored + + test_sqlite: + name: Test + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + uses: actions-rs/toolchain@v1 + with: + components: clippy + override: true + profile: minimal + toolchain: stable - name: Install sqlite3 - run: | + run: | sudo apt install -y sqlite3 sqlite3 fang.db "VACUUM;" mkdir tests_sqlite @@ -62,35 +114,23 @@ jobs: uses: actions-rs/cargo@v1 with: command: install - args: diesel_cli --no-default-features --features "postgres sqlite mysql" + args: diesel_cli --no-default-features --features sqlite - name: Setup Sqlite db working-directory: ./fang/sqlite_migrations run: diesel setup --database-url "sqlite3://../../../fang.db" - - name: Change working dir - working-directory: ./../.. - run: pwd - - - name: Setup Postgres db - working-directory: ./fang/postgres_migrations - run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - - - name: Change working dir - working-directory: ./../.. - run: pwd - - name: Run tests uses: actions-rs/cargo@v1 with: command: test - args: --verbose --all-features + args: --verbose --features asynk-sqlite - name: Run dirty tests uses: actions-rs/cargo@v1 with: command: test - args: --verbose --all-features -- --ignored + args: --verbose --features asynk-sqlite -- --ignored release: name: Release x86_64-unknown-linux-gnu From b5deaeda777d38d3eaae3941993c82629f504e2f Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 16:44:34 +0300 Subject: [PATCH 32/90] fix build --- .github/workflows/rust.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 760ebcdc..47bc50e0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -7,8 +7,6 @@ on: # Check if it works with current dependencies (weekly on Wednesday 2:32 UTC) - cron: '32 2 * * 3' -env : - DATABASE_URL : postgres://postgres:postgres@localhost/fang jobs: clippy: @@ -35,6 +33,8 @@ jobs: test_postgres: name: Test postgres runs-on: ubuntu-latest + env: + DATABASE_URL: postgres://postgres:postgres@localhost/fang services: # Label used to access the service container @@ -90,7 +90,7 @@ jobs: args: --verbose features asynk-postgres -- --ignored test_sqlite: - name: Test + name: Test sqlite runs-on: ubuntu-latest steps: @@ -135,7 +135,6 @@ jobs: release: name: Release x86_64-unknown-linux-gnu runs-on: ubuntu-latest - needs: test steps: - uses: actions/checkout@v3 From 9ffbf516a5c71070fd32a54b7360b7a8a01b0662 Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 16:55:14 +0300 Subject: [PATCH 33/90] fix github actions --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 47bc50e0..4159adb0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -87,7 +87,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --verbose features asynk-postgres -- --ignored + args: asynk::async_queue::postgres --verbose features asynk-postgres -- --ignored test_sqlite: name: Test sqlite @@ -130,7 +130,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --verbose --features asynk-sqlite -- --ignored + args: asynk::async_queue::sqlite --verbose --features asynk-sqlite -- --ignored release: name: Release x86_64-unknown-linux-gnu From f8a87fdc69d3d4d99a94732d40dbc27ff8e9af24 Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 16:57:39 +0300 Subject: [PATCH 34/90] do not run github actions twice --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4159adb0..31792873 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -3,6 +3,7 @@ name: Test and Build Rust on: push: pull_request: + types: [opened, reopened] schedule: # Check if it works with current dependencies (weekly on Wednesday 2:32 UTC) - cron: '32 2 * * 3' From 5592b09cad7c2f1f8f0f2db6c6f8863270c97c6b Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 16:58:19 +0300 Subject: [PATCH 35/90] fix formatting build --- .github/workflows/style.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 1332e2b7..d7e207c6 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -3,6 +3,7 @@ name: Rust Code Formatting on: push: pull_request: + types: [opened, reopened] jobs: rustfmt: From 1bbe5ef33593a3dda1ba8f3aab8ea5cb6cf9b02c Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 8 Sep 2023 17:07:52 +0300 Subject: [PATCH 36/90] another approach --- .github/workflows/rust.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 31792873..a324d002 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -87,8 +87,8 @@ jobs: - name: Run dirty tests uses: actions-rs/cargo@v1 with: - command: test - args: asynk::async_queue::postgres --verbose features asynk-postgres -- --ignored + command: test "asynk::async_queue::postgres" + args: --verbose features asynk-postgres -- --ignored test_sqlite: name: Test sqlite @@ -130,8 +130,8 @@ jobs: - name: Run dirty tests uses: actions-rs/cargo@v1 with: - command: test - args: asynk::async_queue::sqlite --verbose --features asynk-sqlite -- --ignored + command: test "asynk::async_queue::sqlite" + args: --verbose --features asynk-sqlite -- --ignored release: name: Release x86_64-unknown-linux-gnu From 0501322bb4e65b9005af946ebc54960c53fb4e13 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 21:40:07 +0200 Subject: [PATCH 37/90] tokio console debug --- fang/Cargo.toml | 4 ++-- fang/src/asynk/async_queue/async_queue_tests.rs | 1 + fang/src/asynk/backend_sqlx.rs | 13 ++++++++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 65fdb66f..7c9f01f0 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -36,7 +36,7 @@ migrations = ["dep:diesel_migrations"] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio", "postgres", "sqlite", "mysql"]} - +console-subscriber = "0.2.0" [dependencies] cron = "0.12" @@ -73,7 +73,7 @@ optional = true [dependencies.tokio] version = "1.25" -features = ["rt", "time", "macros"] +features = ["rt", "time", "macros", "tracing"] optional = true [dependencies.async-trait] diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index 87df1c1c..ced658db 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -280,6 +280,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_tasks_by_metadata() { + console_subscriber::init(); let mut test: $q = $e.await; let task = test diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 55fe5dce..310c7f0e 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -596,9 +596,13 @@ async fn general_any_impl_remove_task_by_metadata( println!("{query}"); + let adquire = transaction.acquire().await?; + + println!("Adquire {:?}", adquire); + Ok(sqlx::query(query) .bind(uniq_hash) - .execute(transaction.acquire().await?) + .execute(adquire) .await? .rows_affected()) } @@ -887,16 +891,19 @@ async fn mysql_impl_insert_task_uniq( println!("reach here"); + let adquire = transaction.acquire().await?; + println!("reach here 2"); + let affected_rows = sqlx::query(query) .bind(uuid_as_str) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) .bind(scheduled_at_str) - .execute(transaction.acquire().await?) + .execute(adquire) .await? .rows_affected(); - println!("reach here 2"); + println!("reach here 3"); if affected_rows != 1 { // here we should return an error From 56c2dc9a37b3532331b1ba367f6ed14cddd8847b Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 21:48:12 +0200 Subject: [PATCH 38/90] fixing workflow --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a324d002..9b115f58 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -81,7 +81,7 @@ jobs: - name: Run tests uses: actions-rs/cargo@v1 with: - command: test + command: test "asynk::async_queue::postgres" args: --verbose --features asynk-postgres - name: Run dirty tests @@ -124,7 +124,7 @@ jobs: - name: Run tests uses: actions-rs/cargo@v1 with: - command: test + command: test "asynk::async_queue::sqlite" args: --verbose --features asynk-sqlite - name: Run dirty tests From c35cc3be9c9a6095a377b62937d45bc409c904bc Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 21:51:26 +0200 Subject: [PATCH 39/90] fixing workflow again --- .github/workflows/rust.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9b115f58..d994efee 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -81,14 +81,14 @@ jobs: - name: Run tests uses: actions-rs/cargo@v1 with: - command: test "asynk::async_queue::postgres" - args: --verbose --features asynk-postgres + command: test + args: "asynk::async_queue::postgres" --verbose --features asynk-postgres - name: Run dirty tests uses: actions-rs/cargo@v1 with: - command: test "asynk::async_queue::postgres" - args: --verbose features asynk-postgres -- --ignored + command: test + args: "asynk::async_queue::postgres" --verbose features asynk-postgres -- --ignored test_sqlite: name: Test sqlite @@ -124,14 +124,14 @@ jobs: - name: Run tests uses: actions-rs/cargo@v1 with: - command: test "asynk::async_queue::sqlite" - args: --verbose --features asynk-sqlite + command: test + args: "asynk::async_queue::sqlite" --verbose --features asynk-sqlite - name: Run dirty tests uses: actions-rs/cargo@v1 with: - command: test "asynk::async_queue::sqlite" - args: --verbose --features asynk-sqlite -- --ignored + command: test + args: "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --ignored release: name: Release x86_64-unknown-linux-gnu From 218dd3b285929b2ddfd9a24acb8d67129ca3ef0e Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 22:41:33 +0200 Subject: [PATCH 40/90] i think i finally fixed --- .github/workflows/rust.yml | 91 +++++++------------ fang/Cargo.toml | 4 +- .../asynk/async_queue/async_queue_tests.rs | 2 +- 3 files changed, 37 insertions(+), 60 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d994efee..c7564192 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -13,23 +13,22 @@ jobs: clippy: name: Clippy runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: + - stable steps: - uses: actions/checkout@v3 + - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - components: clippy - override: true - profile: minimal - toolchain: stable + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + - name: Run clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --verbose --all-targets --all-features -- -D warnings + run: clippy --verbose --all-targets --all-features -- -D warnings test_postgres: name: Test postgres @@ -37,6 +36,11 @@ jobs: env: DATABASE_URL: postgres://postgres:postgres@localhost/fang + strategy: + matrix: + toolchain: + - stable + services: # Label used to access the service container postgres: @@ -61,49 +65,35 @@ jobs: - uses: actions/checkout@v3 - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - components: clippy - override: true - profile: minimal - toolchain: stable + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Install diesel-cli - uses: actions-rs/cargo@v1 - with: - command: install - args: diesel_cli --no-default-features --features postgres + run: cargo install diesel_cli --no-default-features --features postgres - name: Setup Postgres db working-directory: ./fang/postgres_migrations run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - name: Run tests - uses: actions-rs/cargo@v1 - with: - command: test - args: "asynk::async_queue::postgres" --verbose --features asynk-postgres + run: cargo test "asynk::async_queue::postgres" -- --verbose --features asynk-postgres --color always --nocapture - name: Run dirty tests - uses: actions-rs/cargo@v1 - with: - command: test - args: "asynk::async_queue::postgres" --verbose features asynk-postgres -- --ignored + run: cargo test "asynk::async_queue::postgres" --verbose features asynk-postgres -- --ignored test_sqlite: name: Test sqlite runs-on: ubuntu-latest + strategy: + matrix: + toolchain: + - stable + steps: - uses: actions/checkout@v3 - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - components: clippy - override: true - profile: minimal - toolchain: stable + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Install sqlite3 run: | @@ -112,44 +102,31 @@ jobs: mkdir tests_sqlite - name: Install diesel-cli - uses: actions-rs/cargo@v1 - with: - command: install - args: diesel_cli --no-default-features --features sqlite + run: cargo install diesel_cli --no-default-features --features sqlite - name: Setup Sqlite db working-directory: ./fang/sqlite_migrations run: diesel setup --database-url "sqlite3://../../../fang.db" - name: Run tests - uses: actions-rs/cargo@v1 - with: - command: test - args: "asynk::async_queue::sqlite" --verbose --features asynk-sqlite + run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite - name: Run dirty tests - uses: actions-rs/cargo@v1 - with: - command: test - args: "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --ignored + run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --ignored release: name: Release x86_64-unknown-linux-gnu runs-on: ubuntu-latest + strategy: + matrix: + toolchain: + - stable + steps: - uses: actions/checkout@v3 - - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - override: true - profile: minimal - target: x86_64-unknown-linux-gnu - toolchain: stable + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Build release - uses: actions-rs/cargo@v1 - with: - command: build - args: --release --verbose --all-features --target x86_64-unknown-linux-gnu + run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 7c9f01f0..3fa44304 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -36,7 +36,7 @@ migrations = ["dep:diesel_migrations"] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio", "postgres", "sqlite", "mysql"]} -console-subscriber = "0.2.0" +# console-subscriber = "0.2.0" [dependencies] cron = "0.12" @@ -73,7 +73,7 @@ optional = true [dependencies.tokio] version = "1.25" -features = ["rt", "time", "macros", "tracing"] +features = ["rt", "time", "macros"]#, "tracing"] optional = true [dependencies.async-trait] diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index ced658db..a9ec1028 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -280,7 +280,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_tasks_by_metadata() { - console_subscriber::init(); + //console_subscriber::init(); let mut test: $q = $e.await; let task = test From f09c8e6103c3e0f7e438b3178d0bfa8419c775ac Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 22:48:42 +0200 Subject: [PATCH 41/90] fixed \! --- .github/workflows/rust.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c7564192..f6bc8c15 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -13,7 +13,9 @@ jobs: clippy: name: Clippy runs-on: ubuntu-latest - + env: + CARGO_TERM_COLOR: always + strategy: matrix: toolchain: @@ -28,13 +30,14 @@ jobs: - name: Run clippy - run: clippy --verbose --all-targets --all-features -- -D warnings + run: cargo clippy --verbose --all-targets --all-features -- -D warnings test_postgres: name: Test postgres runs-on: ubuntu-latest env: DATABASE_URL: postgres://postgres:postgres@localhost/fang + CARGO_TERM_COLOR: always strategy: matrix: @@ -75,10 +78,10 @@ jobs: run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - name: Run tests - run: cargo test "asynk::async_queue::postgres" -- --verbose --features asynk-postgres --color always --nocapture + run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres --color always --nocapture - name: Run dirty tests - run: cargo test "asynk::async_queue::postgres" --verbose features asynk-postgres -- --ignored + run: cargo test "asynk::async_queue::postgres" --verbose features asynk-postgres --ignored test_sqlite: name: Test sqlite @@ -117,6 +120,8 @@ jobs: release: name: Release x86_64-unknown-linux-gnu runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always strategy: matrix: From 8dd0c892f80f34f3ba942c1ce0f997b114b2d2ce Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 22:56:37 +0200 Subject: [PATCH 42/90] fixed \? --- .github/workflows/rust.yml | 6 +++--- .github/workflows/style.yml | 18 ++++++++---------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index f6bc8c15..ab062756 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -78,10 +78,10 @@ jobs: run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - name: Run tests - run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres --color always --nocapture + run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres --color always -- --nocapture - name: Run dirty tests - run: cargo test "asynk::async_queue::postgres" --verbose features asynk-postgres --ignored + run: cargo test "asynk::async_queue::postgres" --verbose features asynk-postgres -- --ignored test_sqlite: name: Test sqlite @@ -112,7 +112,7 @@ jobs: run: diesel setup --database-url "sqlite3://../../../fang.db" - name: Run tests - run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite + run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --nocapture - name: Run dirty tests run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --ignored diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index d7e207c6..82f65ec8 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -9,19 +9,17 @@ jobs: rustfmt: name: Rustfmt runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + strategy: + matrix: + toolchain: + - stable steps: - uses: actions/checkout@v3 - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - profile: minimal - components: rustfmt + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Check format - uses: actions-rs/cargo@v1 - with: - command: fmt - args: -- --check --verbose + run: cargo fmt -- --check --verbose From c17740ba355eae118c2af8331d756277bcdd7e2e Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 23:01:29 +0200 Subject: [PATCH 43/90] i am stupid now should be fixed :) --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ab062756..1b4f07f3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -81,7 +81,7 @@ jobs: run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres --color always -- --nocapture - name: Run dirty tests - run: cargo test "asynk::async_queue::postgres" --verbose features asynk-postgres -- --ignored + run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres -- --ignored test_sqlite: name: Test sqlite From d6f95fcec602912124cd927db6d51534806491e1 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 23:45:00 +0200 Subject: [PATCH 44/90] test blocking and fang derive error --- .github/workflows/rust.yml | 49 ++++++++++++++++++++++++++++ fang-derive-error/example/src/lib.rs | 2 +- 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1b4f07f3..cee00219 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -31,7 +31,56 @@ jobs: - name: Run clippy run: cargo clippy --verbose --all-targets --all-features -- -D warnings + + test_postgres_blocking: + name: Test blocking + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Install diesel-cli + run: cargo install diesel_cli --no-default-features --features postgres + + - name: Setup Postgres db + working-directory: ./fang/postgres_migrations + run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" + + - name: Run blocking tests + run: cargo test "blocking::queue::postgres" --verbose --features blocking --color always -- --nocapture + + - name: Run blocking dirty tests + run: cargo test "blocking::queue::postgres" --verbose --features blocking -- --ignored + + test_fang_derive_error: + name: Test fang_derive_error + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + - name: Run fang derive error tests + run: cargo test "fang_derive_error" --verbose --color always -- --nocapture test_postgres: name: Test postgres runs-on: ubuntu-latest diff --git a/fang-derive-error/example/src/lib.rs b/fang-derive-error/example/src/lib.rs index 86262e6f..b4d6c781 100644 --- a/fang-derive-error/example/src/lib.rs +++ b/fang-derive-error/example/src/lib.rs @@ -7,7 +7,7 @@ pub enum MyAwesomeError { MyVariantErrorTwo(u32), } #[cfg(test)] -mod tests { +mod fang_derive_error_tests { use crate::MyAwesomeError; use fang::FangError; From 847a28122b42a6e71c1a2021110b5da6111fa0ff Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 8 Apr 2024 23:48:08 +0200 Subject: [PATCH 45/90] add postgres service to blocking workflow --- .github/workflows/rust.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index cee00219..36103efd 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -38,6 +38,26 @@ jobs: env: CARGO_TERM_COLOR: always + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + # Set health checks to wait until postgres has started + + ports: + - 5432:5432 + + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + strategy: matrix: toolchain: From 8230d9a649778ba3258fb18ae0836ab7498130c3 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 9 Apr 2024 00:01:48 +0200 Subject: [PATCH 46/90] running async worker tests and blocking worker tests --- .github/workflows/rust.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 36103efd..d396251e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -80,7 +80,7 @@ jobs: run: cargo test "blocking::queue::postgres" --verbose --features blocking --color always -- --nocapture - name: Run blocking dirty tests - run: cargo test "blocking::queue::postgres" --verbose --features blocking -- --ignored + run: cargo test "blocking::worker" --verbose --features blocking -- --ignored test_fang_derive_error: name: Test fang_derive_error @@ -101,6 +101,7 @@ jobs: - name: Run fang derive error tests run: cargo test "fang_derive_error" --verbose --color always -- --nocapture + test_postgres: name: Test postgres runs-on: ubuntu-latest @@ -149,8 +150,8 @@ jobs: - name: Run tests run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres --color always -- --nocapture - - name: Run dirty tests - run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres -- --ignored + - name: Run worker tests + run: cargo test "asynk::async_worker::async_worker_tests" --verbose --features asynk-postgres --color always -- --nocapture test_sqlite: name: Test sqlite @@ -183,9 +184,6 @@ jobs: - name: Run tests run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --nocapture - - name: Run dirty tests - run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --ignored - release: name: Release x86_64-unknown-linux-gnu runs-on: ubuntu-latest From 0a230099bf76957daa2d446d7e38d4bd5bd80469 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 9 Apr 2024 00:08:51 +0200 Subject: [PATCH 47/90] cargo term color sqlite --- .github/workflows/rust.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d396251e..e79d7c18 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -156,6 +156,8 @@ jobs: test_sqlite: name: Test sqlite runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always strategy: matrix: From cc792ab7e8e3a13f202a41b8222accb3db0db463 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 9 Apr 2024 18:19:42 +0200 Subject: [PATCH 48/90] change sqlx version to 0.6.3 and using Pool instead of Transactions --- .github/workflows/rust.yml | 57 +++++++ fang/Cargo.toml | 8 +- fang/src/asynk/async_queue.rs | 130 ++++++---------- fang/src/asynk/backend_sqlx.rs | 274 +++++++++++++-------------------- 4 files changed, 220 insertions(+), 249 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index e79d7c18..adf345ca 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -204,3 +204,60 @@ jobs: - name: Build release run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu + +test_mysql: + name: Test mysql + runs-on: ubuntu-latest + env: + DATABASE_URL: mysql://root:mysql@localhost/fang + CARGO_TERM_COLOR: always + + services: + mysql: + image: mysql:8.1 + env: + MYSQL_ROOT_PASSWORD: mysql + MYSQL_DATABASE: fang + ports: + - 3306:3306 + options: --health-cmd "mysqladmin ping" --connect-timeout 5s --wait=5 + + strategy: + matrix: + toolchain: + - stable + + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Install diesel-cli + run: cargo install diesel_cli --no-default-features --features mysql + + - name: Setup Sqlite db + working-directory: ./fang/mysql_migrations + run: diesel setup --database-url "mysql://root:mysql@localhost/fang" + + - name: Run tests + run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql -- --nocapture + + release: + name: Release x86_64-unknown-linux-gnu + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + steps: + - uses: actions/checkout@v3 + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Build release + run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 3fa44304..7728f8d5 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -35,8 +35,9 @@ migrations = ["dep:diesel_migrations"] [dev-dependencies] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} -sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio", "postgres", "sqlite", "mysql"]} -# console-subscriber = "0.2.0" +sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} +#sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio", "postgres", "sqlite", "mysql"]} +#console-subscriber = "0.2.0" [dependencies] cron = "0.12" @@ -55,7 +56,8 @@ fang-derive-error = { version = "0.1.0" , optional = true} # sqlx with no TLS, if you want TLS you must to get feature "tls-native-tls" or "tls-rustls" #sqlx = {version = "0.7", features = ["any" , "macros" , "json" , "uuid" , "chrono" , "runtime-tokio", "postgres", "sqlite", "mysql"] } # https://github.com/launchbadge/sqlx/issues/2416 is fixed in pxp9's fork -sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio"] , optional = true} +#sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio"] , optional = true} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls", "postgres", "sqlite", "mysql"], optional = true} [dependencies.diesel] version = "2.1" diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 2e0c8aa4..08094136 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -13,11 +13,11 @@ use async_trait::async_trait; use chrono::DateTime; use chrono::Utc; use cron::Schedule; -use sqlx::any::install_default_drivers; +//use sqlx::any::install_default_drivers; use sqlx::pool::PoolOptions; use sqlx::Any; use sqlx::AnyPool; -use sqlx::Transaction; +use sqlx::Pool; use std::str::FromStr; use thiserror::Error; use typed_builder::TypedBuilder; @@ -315,20 +315,23 @@ impl AsyncQueue { /// Connect to the db if not connected pub async fn connect(&mut self) -> Result<(), AsyncQueueError> { - install_default_drivers(); + //install_default_drivers(); let pool: AnyPool = PoolOptions::new() .max_connections(self.max_pool_size) .connect(&self.uri) .await?; - let conn = pool.acquire().await?; + let anykind = pool.any_kind(); - let backend = conn.backend_name().to_string(); - - drop(conn); + let backend = match anykind { + sqlx::any::AnyKind::Postgres => BackendSqlX::Pg, + sqlx::any::AnyKind::Sqlite => BackendSqlX::Sqlite, + sqlx::any::AnyKind::MySql => BackendSqlX::Mysql, + _ => BackendSqlX::NoBackend, + }; - self.backend = BackendSqlX::new_with_name(&backend); + self.backend = backend; self.pool = Some(pool); self.connected = true; @@ -336,7 +339,7 @@ impl AsyncQueue { } async fn fetch_and_touch_task_query( - transaction: &mut Transaction<'_, Any>, + pool: &Pool, backend: &BackendSqlX, task_type: Option, ) -> Result, AsyncQueueError> { @@ -348,7 +351,7 @@ impl AsyncQueue { let query_params = QueryParams::builder().task_type(&task_type).build(); let task = backend - .execute_query(SqlXQuery::FetchTaskType, transaction, query_params) + .execute_query(SqlXQuery::FetchTaskType, pool, query_params) .await .map(|val| val.unwrap_task()) .ok(); @@ -360,7 +363,7 @@ impl AsyncQueue { .build(); let task = backend - .execute_query(SqlXQuery::UpdateTaskState, transaction, query_params) + .execute_query(SqlXQuery::UpdateTaskState, pool, query_params) .await? .unwrap_task(); @@ -372,7 +375,7 @@ impl AsyncQueue { } async fn insert_task_query( - transaction: &mut Transaction<'_, Any>, + pool: &Pool, backend: &BackendSqlX, metadata: &serde_json::Value, task_type: &str, @@ -385,7 +388,7 @@ impl AsyncQueue { .build(); let task = backend - .execute_query(SqlXQuery::InsertTask, transaction, query_params) + .execute_query(SqlXQuery::InsertTask, pool, query_params) .await? .unwrap_task(); @@ -393,7 +396,7 @@ impl AsyncQueue { } async fn insert_task_if_not_exist_query( - transaction: &mut Transaction<'_, Any>, + pool: &Pool, backend: &BackendSqlX, metadata: &serde_json::Value, task_type: &str, @@ -406,7 +409,7 @@ impl AsyncQueue { .build(); let task = backend - .execute_query(SqlXQuery::InsertTaskIfNotExists, transaction, query_params) + .execute_query(SqlXQuery::InsertTaskIfNotExists, pool, query_params) .await? .unwrap_task(); @@ -414,7 +417,7 @@ impl AsyncQueue { } async fn schedule_task_query( - transaction: &mut Transaction<'_, Any>, + pool: &Pool, backend: &BackendSqlX, task: &dyn AsyncRunnable, ) -> Result { @@ -439,17 +442,11 @@ impl AsyncQueue { }; let task: Task = if !task.uniq() { - Self::insert_task_query( - transaction, - backend, - &metadata, - &task.task_type(), - &scheduled_at, - ) - .await? + Self::insert_task_query(pool, backend, &metadata, &task.task_type(), &scheduled_at) + .await? } else { Self::insert_task_if_not_exist_query( - transaction, + pool, backend, &metadata, &task.task_type(), @@ -465,18 +462,16 @@ impl AsyncQueue { impl AsyncQueueable for AsyncQueue { async fn find_task_by_id(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().uuid(id).build(); let task = self .backend - .execute_query(SqlXQuery::FindTaskById, &mut transaction, query_params) + .execute_query(SqlXQuery::FindTaskById, pool, query_params) .await? .unwrap_task(); - transaction.commit().await?; - Ok(task) } @@ -485,24 +480,21 @@ impl AsyncQueueable for AsyncQueue { task_type: Option, ) -> Result, AsyncQueueError> { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; - - let task = - Self::fetch_and_touch_task_query(&mut transaction, &self.backend, task_type).await?; + let pool = self.pool.as_ref().unwrap(); - transaction.commit().await?; + let task = Self::fetch_and_touch_task_query(pool, &self.backend, task_type).await?; Ok(task) } async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let metadata = serde_json::to_value(task)?; let task = if !task.uniq() { Self::insert_task_query( - &mut transaction, + pool, &self.backend, &metadata, &task.task_type(), @@ -511,7 +503,7 @@ impl AsyncQueueable for AsyncQueue { .await? } else { Self::insert_task_if_not_exist_query( - &mut transaction, + pool, &self.backend, &metadata, &task.task_type(), @@ -520,74 +512,60 @@ impl AsyncQueueable for AsyncQueue { .await? }; - transaction.commit().await?; - Ok(task) } async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; - - let task = Self::schedule_task_query(&mut transaction, &self.backend, task).await?; + let pool = self.pool.as_ref().unwrap(); - transaction.commit().await?; + let task = Self::schedule_task_query(pool, &self.backend, task).await?; Ok(task) } async fn remove_all_tasks(&mut self) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().build(); let result = self .backend - .execute_query(SqlXQuery::RemoveAllTask, &mut transaction, query_params) + .execute_query(SqlXQuery::RemoveAllTask, pool, query_params) .await? .unwrap_u64(); - transaction.commit().await?; - Ok(result) } async fn remove_all_scheduled_tasks(&mut self) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().build(); let result = self .backend - .execute_query( - SqlXQuery::RemoveAllScheduledTask, - &mut transaction, - query_params, - ) + .execute_query(SqlXQuery::RemoveAllScheduledTask, pool, query_params) .await? .unwrap_u64(); - transaction.commit().await?; - Ok(result) } async fn remove_task(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().uuid(id).build(); let result = self .backend - .execute_query(SqlXQuery::RemoveTask, &mut transaction, query_params) + .execute_query(SqlXQuery::RemoveTask, pool, query_params) .await? .unwrap_u64(); - transaction.commit().await?; - Ok(result) } @@ -597,22 +575,16 @@ impl AsyncQueueable for AsyncQueue { ) -> Result { if task.uniq() { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().runnable(task).build(); let result = self .backend - .execute_query( - SqlXQuery::RemoveTaskByMetadata, - &mut transaction, - query_params, - ) + .execute_query(SqlXQuery::RemoveTaskByMetadata, pool, query_params) .await? .unwrap_u64(); - transaction.commit().await?; - Ok(result) } else { Err(AsyncQueueError::TaskNotUniqError) @@ -621,18 +593,16 @@ impl AsyncQueueable for AsyncQueue { async fn remove_tasks_type(&mut self, task_type: &str) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().task_type(task_type).build(); let result = self .backend - .execute_query(SqlXQuery::RemoveTaskType, &mut transaction, query_params) + .execute_query(SqlXQuery::RemoveTaskType, pool, query_params) .await? .unwrap_u64(); - transaction.commit().await?; - Ok(result) } @@ -642,18 +612,16 @@ impl AsyncQueueable for AsyncQueue { state: FangTaskState, ) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().uuid(&task.id).state(state).build(); let task = self .backend - .execute_query(SqlXQuery::UpdateTaskState, &mut transaction, query_params) + .execute_query(SqlXQuery::UpdateTaskState, pool, query_params) .await? .unwrap_task(); - transaction.commit().await?; - Ok(task) } @@ -663,7 +631,7 @@ impl AsyncQueueable for AsyncQueue { error_message: &str, ) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder() .error_message(error_message) @@ -672,12 +640,10 @@ impl AsyncQueueable for AsyncQueue { let failed_task = self .backend - .execute_query(SqlXQuery::FailTask, &mut transaction, query_params) + .execute_query(SqlXQuery::FailTask, pool, query_params) .await? .unwrap_task(); - transaction.commit().await?; - Ok(failed_task) } @@ -689,7 +655,7 @@ impl AsyncQueueable for AsyncQueue { ) -> Result { self.check_if_connection()?; - let mut transaction = self.pool.as_ref().unwrap().begin().await?; + let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder() .backoff_seconds(backoff_seconds) @@ -699,12 +665,10 @@ impl AsyncQueueable for AsyncQueue { let failed_task = self .backend - .execute_query(SqlXQuery::RetryTask, &mut transaction, query_params) + .execute_query(SqlXQuery::RetryTask, pool, query_params) .await? .unwrap_task(); - transaction.commit().await?; - Ok(failed_task) } } diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 310c7f0e..9f5a3b78 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -3,7 +3,8 @@ use chrono::Duration; use chrono::Utc; use sha2::Digest; use sha2::Sha256; -use sqlx::{Acquire, Any, Transaction}; +use sqlx::Any; +use sqlx::Pool; use std::fmt::Debug; use typed_builder::TypedBuilder; use uuid::Uuid; @@ -125,15 +126,13 @@ impl BackendSqlX { pub(crate) async fn execute_query<'a>( &self, query: SqlXQuery, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match self { - BackendSqlX::Pg => BackendSqlXPg::execute_query(query, transaction, params).await, - BackendSqlX::Sqlite => { - BackendSqlXSQLite::execute_query(query, transaction, params).await - } - BackendSqlX::Mysql => BackendSqlXMySQL::execute_query(query, transaction, params).await, + BackendSqlX::Pg => BackendSqlXPg::execute_query(query, pool, params).await, + BackendSqlX::Sqlite => BackendSqlXSQLite::execute_query(query, pool, params).await, + BackendSqlX::Mysql => BackendSqlXMySQL::execute_query(query, pool, params).await, _ => unreachable!(), } } @@ -176,21 +175,20 @@ use crate::{AsyncQueueError, Task}; impl BackendSqlXPg { async fn execute_query( query: SqlXQuery, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match query { Q::InsertTask => { let task = - general_any_impl_insert_task(INSERT_TASK_QUERY_POSTGRES, transaction, params) - .await?; + general_any_impl_insert_task(INSERT_TASK_QUERY_POSTGRES, pool, params).await?; Ok(Res::Task(task)) } Q::UpdateTaskState => { let task = general_any_impl_update_task_state( UPDATE_TASK_STATE_QUERY_POSTGRES, - transaction, + pool, params, ) .await?; @@ -198,22 +196,20 @@ impl BackendSqlXPg { } Q::FailTask => { let task = - general_any_impl_fail_task(FAIL_TASK_QUERY_POSTGRES, transaction, params) - .await?; + general_any_impl_fail_task(FAIL_TASK_QUERY_POSTGRES, pool, params).await?; Ok(Res::Task(task)) } Q::RemoveAllTask => { let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, transaction) - .await?; + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, pool).await?; Ok(Res::Bigint(affected_rows)) } Q::RemoveAllScheduledTask => { let affected_rows = general_any_impl_remove_all_scheduled_tasks( REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, - transaction, + pool, ) .await?; @@ -221,15 +217,14 @@ impl BackendSqlXPg { } Q::RemoveTask => { let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, transaction, params) - .await?; + general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, pool, params).await?; Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskByMetadata => { let affected_rows = general_any_impl_remove_task_by_metadata( REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, - transaction, + pool, params, ) .await?; @@ -239,7 +234,7 @@ impl BackendSqlXPg { Q::RemoveTaskType => { let affected_rows = general_any_impl_remove_task_type( REMOVE_TASKS_TYPE_QUERY_POSTGRES, - transaction, + pool, params, ) .await?; @@ -247,27 +242,20 @@ impl BackendSqlXPg { Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { - let task = general_any_impl_fetch_task_type( - FETCH_TASK_TYPE_QUERY_POSTGRES, - transaction, - params, - ) - .await?; + let task = + general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_POSTGRES, pool, params) + .await?; Ok(Res::Task(task)) } Q::FindTaskById => { - let task = general_any_impl_find_task_by_id( - FIND_TASK_BY_ID_QUERY_POSTGRES, - transaction, - params, - ) - .await?; + let task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_POSTGRES, pool, params) + .await?; Ok(Res::Task(task)) } Q::RetryTask => { let task = - general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, transaction, params) - .await?; + general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, pool, params).await?; Ok(Res::Task(task)) } @@ -277,7 +265,7 @@ impl BackendSqlXPg { FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, INSERT_TASK_UNIQ_QUERY_POSTGRES, ), - transaction, + pool, params, ) .await?; @@ -298,43 +286,40 @@ struct BackendSqlXSQLite {} impl BackendSqlXSQLite { async fn execute_query( query: SqlXQuery, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match query { Q::InsertTask => { let task = - general_any_impl_insert_task(INSERT_TASK_QUERY_SQLITE, transaction, params) - .await?; + general_any_impl_insert_task(INSERT_TASK_QUERY_SQLITE, pool, params).await?; Ok(Res::Task(task)) } Q::UpdateTaskState => { let task = general_any_impl_update_task_state( UPDATE_TASK_STATE_QUERY_SQLITE, - transaction, + pool, params, ) .await?; Ok(Res::Task(task)) } Q::FailTask => { - let task = - general_any_impl_fail_task(FAIL_TASK_QUERY_SQLITE, transaction, params).await?; + let task = general_any_impl_fail_task(FAIL_TASK_QUERY_SQLITE, pool, params).await?; Ok(Res::Task(task)) } Q::RemoveAllTask => { let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, transaction) - .await?; + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, pool).await?; Ok(Res::Bigint(affected_rows)) } Q::RemoveAllScheduledTask => { let affected_rows = general_any_impl_remove_all_scheduled_tasks( REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, - transaction, + pool, ) .await?; @@ -342,15 +327,14 @@ impl BackendSqlXSQLite { } Q::RemoveTask => { let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, transaction, params) - .await?; + general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, pool, params).await?; Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskByMetadata => { let affected_rows = general_any_impl_remove_task_by_metadata( REMOVE_TASK_BY_METADATA_QUERY_SQLITE, - transaction, + pool, params, ) .await?; @@ -358,37 +342,27 @@ impl BackendSqlXSQLite { Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskType => { - let affected_rows = general_any_impl_remove_task_type( - REMOVE_TASKS_TYPE_QUERY_SQLITE, - transaction, - params, - ) - .await?; + let affected_rows = + general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_SQLITE, pool, params) + .await?; Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { - let task = general_any_impl_fetch_task_type( - FETCH_TASK_TYPE_QUERY_SQLITE, - transaction, - params, - ) - .await?; + let task = + general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_SQLITE, pool, params) + .await?; Ok(Res::Task(task)) } Q::FindTaskById => { - let task = general_any_impl_find_task_by_id( - FIND_TASK_BY_ID_QUERY_SQLITE, - transaction, - params, - ) - .await?; + let task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_SQLITE, pool, params) + .await?; Ok(Res::Task(task)) } Q::RetryTask => { let task = - general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, transaction, params) - .await?; + general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, pool, params).await?; Ok(Res::Task(task)) } @@ -398,7 +372,7 @@ impl BackendSqlXSQLite { FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, INSERT_TASK_UNIQ_QUERY_SQLITE, ), - transaction, + pool, params, ) .await?; @@ -415,18 +389,18 @@ impl BackendSqlXSQLite { async fn general_any_impl_insert_task_if_not_exists( queries: (&str, &str), - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { - match general_any_impl_find_task_by_uniq_hash(queries.0, transaction, ¶ms).await { + match general_any_impl_find_task_by_uniq_hash(queries.0, pool, ¶ms).await { Some(task) => Ok(task), - None => general_any_impl_insert_task_uniq(queries.1, transaction, params).await, + None => general_any_impl_insert_task_uniq(queries.1, pool, params).await, } } async fn general_any_impl_insert_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); @@ -443,7 +417,7 @@ async fn general_any_impl_insert_task( .bind(metadata_str) .bind(task_type) .bind(scheduled_at_str) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(task) @@ -458,7 +432,7 @@ pub(crate) fn calculate_hash(json: &str) -> String { async fn general_any_impl_insert_task_uniq( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); @@ -480,14 +454,14 @@ async fn general_any_impl_insert_task_uniq( .bind(task_type) .bind(uniq_hash) .bind(scheduled_at_str) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(task) } async fn general_any_impl_update_task_state( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -503,7 +477,7 @@ async fn general_any_impl_update_task_state( .bind(state_str) .bind(updated_at_str) .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(task) @@ -511,7 +485,7 @@ async fn general_any_impl_update_task_state( async fn general_any_impl_fail_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -528,7 +502,7 @@ async fn general_any_impl_fail_task( .bind(error_message) .bind(updated_at) .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(failed_task) @@ -536,30 +510,27 @@ async fn general_any_impl_fail_task( async fn general_any_impl_remove_all_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, ) -> Result { - Ok(sqlx::query(query) - .execute(transaction.acquire().await?) - .await? - .rows_affected()) + Ok(sqlx::query(query).execute(pool).await?.rows_affected()) } async fn general_any_impl_remove_all_scheduled_tasks( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, ) -> Result { let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); Ok(sqlx::query(query) .bind(now_str) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected()) } async fn general_any_impl_remove_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let mut buffer = Uuid::encode_buffer(); @@ -571,7 +542,7 @@ async fn general_any_impl_remove_task( let result = sqlx::query(query) .bind(&*uuid_as_text) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected(); @@ -587,7 +558,7 @@ async fn general_any_impl_remove_task( async fn general_any_impl_remove_task_by_metadata( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let metadata = serde_json::to_value(params.runnable.unwrap())?; @@ -596,7 +567,7 @@ async fn general_any_impl_remove_task_by_metadata( println!("{query}"); - let adquire = transaction.acquire().await?; + let adquire = pool; println!("Adquire {:?}", adquire); @@ -609,21 +580,21 @@ async fn general_any_impl_remove_task_by_metadata( async fn general_any_impl_remove_task_type( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let task_type = params.task_type.unwrap(); Ok(sqlx::query(query) .bind(task_type) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected()) } async fn general_any_impl_fetch_task_type( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let task_type = params.task_type.unwrap(); @@ -633,7 +604,7 @@ async fn general_any_impl_fetch_task_type( let task: Task = sqlx::query_as(query) .bind(task_type) .bind(now_str) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(task) @@ -641,7 +612,7 @@ async fn general_any_impl_fetch_task_type( async fn general_any_impl_find_task_by_uniq_hash( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: &QueryParams<'_>, ) -> Option { let metadata = params.metadata.unwrap(); @@ -650,14 +621,14 @@ async fn general_any_impl_find_task_by_uniq_hash( sqlx::query_as(query) .bind(uniq_hash) - .fetch_one(transaction.acquire().await.ok()?) + .fetch_one(pool) .await .ok() } async fn general_any_impl_find_task_by_id( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let mut buffer = Uuid::encode_buffer(); @@ -669,7 +640,7 @@ async fn general_any_impl_find_task_by_id( let task: Task = sqlx::query_as(query) .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(task) @@ -677,7 +648,7 @@ async fn general_any_impl_find_task_by_id( async fn general_any_impl_retry_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let now = Utc::now(); @@ -703,7 +674,7 @@ async fn general_any_impl_retry_task( .bind(scheduled_at_str) .bind(now_str) .bind(&*uuid_as_text) - .fetch_one(transaction.acquire().await?) + .fetch_one(pool) .await?; Ok(failed_task) @@ -715,36 +686,31 @@ struct BackendSqlXMySQL {} impl BackendSqlXMySQL { async fn execute_query( query: SqlXQuery, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match query { Q::InsertTask => { - let task = - mysql_impl_insert_task(INSERT_TASK_QUERY_MYSQL, transaction, params).await?; + let task = mysql_impl_insert_task(INSERT_TASK_QUERY_MYSQL, pool, params).await?; Ok(Res::Task(task)) } Q::UpdateTaskState => { - let task = mysql_impl_update_task_state( - UPDATE_TASK_STATE_QUERY_MYSQL, - transaction, - params, - ) - .await?; + let task = + mysql_impl_update_task_state(UPDATE_TASK_STATE_QUERY_MYSQL, pool, params) + .await?; Ok(Res::Task(task)) } Q::FailTask => { - let task = mysql_impl_fail_task(FAIL_TASK_QUERY_MYSQL, transaction, params).await?; + let task = mysql_impl_fail_task(FAIL_TASK_QUERY_MYSQL, pool, params).await?; Ok(Res::Task(task)) } Q::RemoveAllTask => { let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, transaction) - .await?; + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, pool).await?; Ok(Res::Bigint(affected_rows)) } @@ -752,7 +718,7 @@ impl BackendSqlXMySQL { Q::RemoveAllScheduledTask => { let affected_rows = general_any_impl_remove_all_scheduled_tasks( REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, - transaction, + pool, ) .await?; @@ -761,15 +727,14 @@ impl BackendSqlXMySQL { Q::RemoveTask => { let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, transaction, params) - .await?; + general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, pool, params).await?; Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskByMetadata => { let affected_rows = general_any_impl_remove_task_by_metadata( REMOVE_TASK_BY_METADATA_QUERY_MYSQL, - transaction, + pool, params, ) .await?; @@ -777,37 +742,27 @@ impl BackendSqlXMySQL { Ok(Res::Bigint(affected_rows)) } Q::RemoveTaskType => { - let affected_rows = general_any_impl_remove_task_type( - REMOVE_TASKS_TYPE_QUERY_MYSQL, - transaction, - params, - ) - .await?; + let affected_rows = + general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_MYSQL, pool, params) + .await?; Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { - let task = general_any_impl_fetch_task_type( - FETCH_TASK_TYPE_QUERY_MYSQL, - transaction, - params, - ) - .await?; + let task = + general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_MYSQL, pool, params) + .await?; Ok(Res::Task(task)) } Q::FindTaskById => { - let task: Task = general_any_impl_find_task_by_id( - FIND_TASK_BY_ID_QUERY_MYSQL, - transaction, - params, - ) - .await?; + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, params) + .await?; Ok(Res::Task(task)) } Q::RetryTask => { - let task = - mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, transaction, params).await?; + let task = mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, pool, params).await?; Ok(Res::Task(task)) } @@ -817,7 +772,7 @@ impl BackendSqlXMySQL { FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, INSERT_TASK_UNIQ_QUERY_MYSQL, ), - transaction, + pool, params, ) .await?; @@ -834,7 +789,7 @@ impl BackendSqlXMySQL { async fn mysql_impl_insert_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); @@ -851,7 +806,7 @@ async fn mysql_impl_insert_task( .bind(metadata_str) .bind(task_type) .bind(scheduled_at_str) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected(); @@ -863,15 +818,14 @@ async fn mysql_impl_insert_task( let query_params = QueryParams::builder().uuid(&uuid).build(); let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) - .await?; + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; Ok(task) } async fn mysql_impl_insert_task_uniq( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); @@ -891,18 +845,16 @@ async fn mysql_impl_insert_task_uniq( println!("reach here"); - let adquire = transaction.acquire().await?; - println!("reach here 2"); - let affected_rows = sqlx::query(query) .bind(uuid_as_str) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) .bind(scheduled_at_str) - .execute(adquire) + .execute(pool) .await? .rows_affected(); + println!("reach here 3"); if affected_rows != 1 { @@ -913,15 +865,14 @@ async fn mysql_impl_insert_task_uniq( let query_params = QueryParams::builder().uuid(&uuid).build(); let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) - .await?; + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, &pool, query_params).await?; Ok(task) } async fn mysql_impl_update_task_state( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -937,7 +888,7 @@ async fn mysql_impl_update_task_state( .bind(state_str) .bind(updated_at_str) .bind(&*uuid_as_text) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected(); @@ -949,15 +900,14 @@ async fn mysql_impl_update_task_state( let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) - .await?; + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; Ok(task) } async fn mysql_impl_fail_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); @@ -974,7 +924,7 @@ async fn mysql_impl_fail_task( .bind(error_message) .bind(updated_at) .bind(&*uuid_as_text) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected(); @@ -986,15 +936,14 @@ async fn mysql_impl_fail_task( let query_params = QueryParams::builder().uuid(&id).build(); let failed_task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) - .await?; + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; Ok(failed_task) } async fn mysql_impl_retry_task( query: &str, - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { let now = Utc::now(); @@ -1017,7 +966,7 @@ async fn mysql_impl_retry_task( .bind(scheduled_at_str) .bind(now_str) .bind(&*uuid_as_text) - .execute(transaction.acquire().await?) + .execute(pool) .await? .rows_affected(); @@ -1029,19 +978,18 @@ async fn mysql_impl_retry_task( let query_params = QueryParams::builder().uuid(&uuid).build(); let failed_task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, transaction, query_params) - .await?; + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; Ok(failed_task) } async fn mysql_any_impl_insert_task_if_not_exists( queries: (&str, &str), - transaction: &mut Transaction<'_, Any>, + pool: &Pool, params: QueryParams<'_>, ) -> Result { - match general_any_impl_find_task_by_uniq_hash(queries.0, transaction, ¶ms).await { + match general_any_impl_find_task_by_uniq_hash(queries.0, pool, ¶ms).await { Some(task) => Ok(task), - None => mysql_impl_insert_task_uniq(queries.1, transaction, params).await, + None => mysql_impl_insert_task_uniq(queries.1, pool, params).await, } } From 262d5d6808742c23b10fb7f8efbec9bf1ca7984a Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 9 Apr 2024 18:26:26 +0200 Subject: [PATCH 49/90] fix workflow for mysql --- .github/workflows/rust.yml | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index adf345ca..4e10f42a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -241,23 +241,4 @@ test_mysql: run: diesel setup --database-url "mysql://root:mysql@localhost/fang" - name: Run tests - run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql -- --nocapture - - release: - name: Release x86_64-unknown-linux-gnu - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - - strategy: - matrix: - toolchain: - - stable - - steps: - - uses: actions/checkout@v3 - - name: Setup Rust - run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - - - name: Build release - run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu + run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql -- --nocapture \ No newline at end of file From 41584c7cb932cb55a782e6ea019c80b2b3816045 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 9 Apr 2024 18:28:49 +0200 Subject: [PATCH 50/90] fix workflow --- .github/workflows/rust.yml | 40 +------------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4e10f42a..8d877fd5 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -203,42 +203,4 @@ jobs: run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Build release - run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu - -test_mysql: - name: Test mysql - runs-on: ubuntu-latest - env: - DATABASE_URL: mysql://root:mysql@localhost/fang - CARGO_TERM_COLOR: always - - services: - mysql: - image: mysql:8.1 - env: - MYSQL_ROOT_PASSWORD: mysql - MYSQL_DATABASE: fang - ports: - - 3306:3306 - options: --health-cmd "mysqladmin ping" --connect-timeout 5s --wait=5 - - strategy: - matrix: - toolchain: - - stable - - steps: - - uses: actions/checkout@v3 - - - name: Setup Rust - run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - - - name: Install diesel-cli - run: cargo install diesel_cli --no-default-features --features mysql - - - name: Setup Sqlite db - working-directory: ./fang/mysql_migrations - run: diesel setup --database-url "mysql://root:mysql@localhost/fang" - - - name: Run tests - run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql -- --nocapture \ No newline at end of file + run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu \ No newline at end of file From 4b0bdadeadba01ca8ea0a3b5e5b49530b221e13c Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 9 Apr 2024 23:58:51 +0200 Subject: [PATCH 51/90] fix clippy, warns and mysql workflow --- .github/workflows/rust.yml | 49 +++++++++++++++++++++++++++++++++- fang/src/asynk/async_queue.rs | 10 +++---- fang/src/asynk/backend_sqlx.rs | 4 +-- 3 files changed, 55 insertions(+), 8 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 8d877fd5..f81fae8a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -203,4 +203,51 @@ jobs: run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Build release - run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu \ No newline at end of file + run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu + + test_mysql: + name: Test mysql + runs-on: ubuntu-latest + env: + DATABASE_URL: mysql://root:mysql@localhost/fang + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + services: + # Label used to access the service container + mysql: + # Docker Hub image + image: mysql:8.1 + # Provide the password for postgres + env: + MYSQL_ROOT_PASSWORD: mysql + MYSQL_DATABASE: fang + # Set health checks to wait until postgres has started + + ports: + - 5432:5432 + + options: >- + --health-cmd mysqladmin ping + --connection-timeout 5s + --wait=5 + + steps: + - uses: actions/checkout@v3 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Install diesel-cli + run: cargo install diesel_cli --no-default-features --features mysql + + - name: Setup Postgres db + working-directory: ./fang/mysql_migrations + run: diesel setup --database-url "mysql://root:mysql@localhost/fang" + + - name: Run tests + run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql --color always -- --nocapture \ No newline at end of file diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 08094136..a087dc01 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -13,7 +13,8 @@ use async_trait::async_trait; use chrono::DateTime; use chrono::Utc; use cron::Schedule; -//use sqlx::any::install_default_drivers; +//use sqlx::any::install_default_drivers; // this is supported in sqlx 0.7 +use sqlx::any::AnyKind; use sqlx::pool::PoolOptions; use sqlx::Any; use sqlx::AnyPool; @@ -325,10 +326,9 @@ impl AsyncQueue { let anykind = pool.any_kind(); let backend = match anykind { - sqlx::any::AnyKind::Postgres => BackendSqlX::Pg, - sqlx::any::AnyKind::Sqlite => BackendSqlX::Sqlite, - sqlx::any::AnyKind::MySql => BackendSqlX::Mysql, - _ => BackendSqlX::NoBackend, + AnyKind::Postgres => BackendSqlX::Pg, + AnyKind::Sqlite => BackendSqlX::Sqlite, + AnyKind::MySql => BackendSqlX::Mysql, }; self.backend = backend; diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 9f5a3b78..7c4e6392 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -114,7 +114,7 @@ impl Res { } impl BackendSqlX { - pub fn new_with_name(name: &str) -> BackendSqlX { + pub fn _new_with_name(name: &str) -> BackendSqlX { match name { "PostgreSQL" => BackendSqlX::Pg, "SQLite" => BackendSqlX::Sqlite, @@ -865,7 +865,7 @@ async fn mysql_impl_insert_task_uniq( let query_params = QueryParams::builder().uuid(&uuid).build(); let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, &pool, query_params).await?; + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; Ok(task) } From 192722db440e62c193614166febc6efe1bb1dadd Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 10 Apr 2024 00:40:46 +0200 Subject: [PATCH 52/90] adress comments and fix workflow --- .github/workflows/rust.yml | 5 +- fang/Cargo.toml | 9 +- fang/src/asynk/async_queue.rs | 262 +++++++++++++++++----------------- 3 files changed, 136 insertions(+), 140 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index f81fae8a..05328530 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -204,7 +204,7 @@ jobs: - name: Build release run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu - + test_mysql: name: Test mysql runs-on: ubuntu-latest @@ -232,7 +232,8 @@ jobs: - 5432:5432 options: >- - --health-cmd mysqladmin ping + --health-cmd mysqladmin + ping --connection-timeout 5s --wait=5 diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 7728f8d5..35ec4a02 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -15,7 +15,7 @@ rust-version = "1.62" doctest = false [features] -default = ["blocking", "asynk-sqlx", "derive-error", "blocking-postgres", "blocking-mysql" , "blocking-sqlite", "migrations-postgres", "migrations-sqlite" , "migrations-mysql"] +default = ["blocking", "asynk-sqlx", "derive-error", "blocking-postgres", "blocking-mysql" , "blocking-sqlite", "migrations-postgres", "migrations-sqlite", "migrations-mysql"] asynk-postgres = ["asynk-sqlx" , "sqlx?/postgres"] asynk-sqlite = ["asynk-sqlx" , "sqlx?/sqlite"] asynk-mysql = ["asynk-sqlx" , "sqlx?/mysql"] @@ -36,8 +36,7 @@ migrations = ["dep:diesel_migrations"] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} -#sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio", "postgres", "sqlite", "mysql"]} -#console-subscriber = "0.2.0" +#console-subscriber = "0.2.0" # for tokio tracing debug [dependencies] cron = "0.12" @@ -53,10 +52,6 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} -# sqlx with no TLS, if you want TLS you must to get feature "tls-native-tls" or "tls-rustls" -#sqlx = {version = "0.7", features = ["any" , "macros" , "json" , "uuid" , "chrono" , "runtime-tokio", "postgres", "sqlite", "mysql"] } -# https://github.com/launchbadge/sqlx/issues/2416 is fixed in pxp9's fork -#sqlx = {git = "https://github.com/pxp9/sqlx", branch = "main", features = ["any" , "macros" , "runtime-tokio"] , optional = true} sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls", "postgres", "sqlite", "mysql"], optional = true} [dependencies.diesel] diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index a087dc01..b9b33d1a 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -173,137 +173,6 @@ use std::env; use super::backend_sqlx::BackendSqlX; -#[cfg(test)] -impl AsyncQueue { - /// Provides an AsyncQueue connected to its own DB - pub async fn test_postgres() -> Self { - dotenvy::dotenv().expect(".env file not found"); - let base_url = env::var("POSTGRES_BASE_URL").expect("Base URL for Postgres not found"); - let base_db = env::var("POSTGRES_DB").expect("Name for base Postgres DB not found"); - - let mut res = Self::builder() - .max_pool_size(1_u32) - .uri(format!("{}/{}", base_url, base_db)) - .build(); - - let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; - res.connect().await.unwrap(); - - let db_name = format!("async_queue_test_{}", *new_number); - *new_number += 1; - - let create_query: &str = &format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); - let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - - let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); - - log::info!("Deleting database {db_name} ..."); - conn.execute(delete_query).await.unwrap(); - - log::info!("Creating database {db_name} ..."); - let expected_error: &str = &format!( - "source database \"{}\" is being accessed by other users", - base_db - ); - while let Err(e) = conn.execute(create_query).await { - if e.as_database_error().unwrap().message() != expected_error { - panic!("{:?}", e); - } - } - - log::info!("Database {db_name} created !!"); - - res.connected = false; - res.pool = None; - res.uri = format!("{}/{}", base_url, db_name); - res.connect().await.unwrap(); - - res - } - - /// Provides an AsyncQueue connected to its own DB - pub async fn test_sqlite() -> Self { - dotenvy::dotenv().expect(".env file not found"); - let tests_dir = env::var("SQLITE_TESTS_DIR").expect("Name for tests directory not found"); - let base_file = env::var("SQLITE_FILE").expect("Name for SQLite DB file not found"); - let sqlite_file = format!("../{}", base_file); - - let mut new_number = ASYNC_QUEUE_SQLITE_TEST_COUNTER.lock().await; - - let db_name = format!("../{}/async_queue_test_{}.db", tests_dir, *new_number); - *new_number += 1; - - let path = Path::new(&db_name); - - if path.exists() { - log::info!("Deleting database {db_name} ..."); - std::fs::remove_file(path).unwrap(); - } - - log::info!("Creating database {db_name} ..."); - std::fs::copy(sqlite_file, &db_name).unwrap(); - log::info!("Database {db_name} created !!"); - - let mut res = Self::builder() - .max_pool_size(1_u32) - .uri(format!("sqlite://{}", db_name)) - .build(); - - res.connect().await.expect("fail to connect"); - res - } - - /// Provides an AsyncQueue connected to its own DB - pub async fn test_mysql() -> Self { - dotenvy::dotenv().expect(".env file not found"); - let base_url = env::var("MYSQL_BASE_URL").expect("Base URL for MySQL not found"); - let base_db = env::var("MYSQL_DB").expect("Name for base MySQL DB not found"); - - let mut res = Self::builder() - .max_pool_size(1_u32) - .uri(format!("{}/{}", base_url, base_db)) - .build(); - - let mut new_number = ASYNC_QUEUE_MYSQL_TEST_COUNTER.lock().await; - res.connect().await.unwrap(); - - let db_name = format!("async_queue_test_{}", *new_number); - *new_number += 1; - - let create_query: &str = &format!( - "CREATE DATABASE {}; CREATE TABLE {}.fang_tasks LIKE fang.fang_tasks;", - db_name, db_name - ); - - let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - - let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); - - log::info!("Deleting database {db_name} ..."); - conn.execute(delete_query).await.unwrap(); - - log::info!("Creating database {db_name} ..."); - let expected_error: &str = &format!( - "source database \"{}\" is being accessed by other users", - base_db - ); - while let Err(e) = conn.execute(create_query).await { - if e.as_database_error().unwrap().message() != expected_error { - panic!("{:?}", e); - } - } - - log::info!("Database {db_name} created !!"); - - res.connected = false; - res.pool = None; - res.uri = format!("{}/{}", base_url, db_name); - res.connect().await.unwrap(); - - res - } -} - impl AsyncQueue { /// Check if the connection with db is established pub fn check_if_connection(&self) -> Result<(), AsyncQueueError> { @@ -673,6 +542,137 @@ impl AsyncQueueable for AsyncQueue { } } +#[cfg(test)] +impl AsyncQueue { + /// Provides an AsyncQueue connected to its own DB + pub async fn test_postgres() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let base_url = env::var("POSTGRES_BASE_URL").expect("Base URL for Postgres not found"); + let base_db = env::var("POSTGRES_DB").expect("Name for base Postgres DB not found"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("{}/{}", base_url, base_db)) + .build(); + + let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; + res.connect().await.unwrap(); + + let db_name = format!("async_queue_test_{}", *new_number); + *new_number += 1; + + let create_query: &str = &format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); + let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); + + let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); + + log::info!("Deleting database {db_name} ..."); + conn.execute(delete_query).await.unwrap(); + + log::info!("Creating database {db_name} ..."); + let expected_error: &str = &format!( + "source database \"{}\" is being accessed by other users", + base_db + ); + while let Err(e) = conn.execute(create_query).await { + if e.as_database_error().unwrap().message() != expected_error { + panic!("{:?}", e); + } + } + + log::info!("Database {db_name} created !!"); + + res.connected = false; + res.pool = None; + res.uri = format!("{}/{}", base_url, db_name); + res.connect().await.unwrap(); + + res + } + + /// Provides an AsyncQueue connected to its own DB + pub async fn test_sqlite() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let tests_dir = env::var("SQLITE_TESTS_DIR").expect("Name for tests directory not found"); + let base_file = env::var("SQLITE_FILE").expect("Name for SQLite DB file not found"); + let sqlite_file = format!("../{}", base_file); + + let mut new_number = ASYNC_QUEUE_SQLITE_TEST_COUNTER.lock().await; + + let db_name = format!("../{}/async_queue_test_{}.db", tests_dir, *new_number); + *new_number += 1; + + let path = Path::new(&db_name); + + if path.exists() { + log::info!("Deleting database {db_name} ..."); + std::fs::remove_file(path).unwrap(); + } + + log::info!("Creating database {db_name} ..."); + std::fs::copy(sqlite_file, &db_name).unwrap(); + log::info!("Database {db_name} created !!"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("sqlite://{}", db_name)) + .build(); + + res.connect().await.expect("fail to connect"); + res + } + + /// Provides an AsyncQueue connected to its own DB + pub async fn test_mysql() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let base_url = env::var("MYSQL_BASE_URL").expect("Base URL for MySQL not found"); + let base_db = env::var("MYSQL_DB").expect("Name for base MySQL DB not found"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("{}/{}", base_url, base_db)) + .build(); + + let mut new_number = ASYNC_QUEUE_MYSQL_TEST_COUNTER.lock().await; + res.connect().await.unwrap(); + + let db_name = format!("async_queue_test_{}", *new_number); + *new_number += 1; + + let create_query: &str = &format!( + "CREATE DATABASE {}; CREATE TABLE {}.fang_tasks LIKE fang.fang_tasks;", + db_name, db_name + ); + + let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); + + let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); + + log::info!("Deleting database {db_name} ..."); + conn.execute(delete_query).await.unwrap(); + + log::info!("Creating database {db_name} ..."); + let expected_error: &str = &format!( + "source database \"{}\" is being accessed by other users", + base_db + ); + while let Err(e) = conn.execute(create_query).await { + if e.as_database_error().unwrap().message() != expected_error { + panic!("{:?}", e); + } + } + + log::info!("Database {db_name} created !!"); + + res.connected = false; + res.pool = None; + res.uri = format!("{}/{}", base_url, db_name); + res.connect().await.unwrap(); + + res + } +} + #[cfg(test)] test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} #[cfg(test)] From db93dbbbd05a851441e3a2e3b87efe63e045e45f Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 10 Apr 2024 00:54:47 +0200 Subject: [PATCH 53/90] fix mysql healthcheck --- .github/workflows/rust.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 05328530..0f253b2a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -232,10 +232,9 @@ jobs: - 5432:5432 options: >- - --health-cmd mysqladmin - ping - --connection-timeout 5s - --wait=5 + --health-cmd mysqladmin ping + --health-interval 10s + --health-timeout 5s steps: - uses: actions/checkout@v3 From 32905b2719347342440a5b94ec895f24c01de83d Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 10 Apr 2024 01:01:43 +0200 Subject: [PATCH 54/90] fix mysql healthcheck, yeap again --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 0f253b2a..ce0db4f8 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -232,7 +232,7 @@ jobs: - 5432:5432 options: >- - --health-cmd mysqladmin ping + --health-cmd='mysqladmin ping -h localhost -u root -pmysql' --health-interval 10s --health-timeout 5s From 2b30c0a57e4d7b247eb24ff16774bd7c56514a95 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 10 Apr 2024 01:05:58 +0200 Subject: [PATCH 55/90] remove healthcheck --- .github/workflows/rust.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ce0db4f8..ac228f58 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -229,12 +229,7 @@ jobs: # Set health checks to wait until postgres has started ports: - - 5432:5432 - - options: >- - --health-cmd='mysqladmin ping -h localhost -u root -pmysql' - --health-interval 10s - --health-timeout 5s + - 3306:3306 steps: - uses: actions/checkout@v3 From 9f0c74dc6b2d234d306cdcad30c0ec2ca40cee05 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 10 Apr 2024 01:16:13 +0200 Subject: [PATCH 56/90] healthcheck is needed , plz work --- .github/workflows/rust.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ac228f58..6dfd7bbc 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -226,7 +226,12 @@ jobs: env: MYSQL_ROOT_PASSWORD: mysql MYSQL_DATABASE: fang - # Set health checks to wait until postgres has started + # here we should check if mysql is ready, but this does not work + options: >- + --health-cmd='$(mysqladmin ping -h localhost -P 3306 -u root --password=mysql 2>&1)' + --health-interval 10s + --health-timeout 5s + --health-retries 5 ports: - 3306:3306 @@ -240,7 +245,7 @@ jobs: - name: Install diesel-cli run: cargo install diesel_cli --no-default-features --features mysql - - name: Setup Postgres db + - name: Setup MySQL db working-directory: ./fang/mysql_migrations run: diesel setup --database-url "mysql://root:mysql@localhost/fang" From 5ed857ffba1d22e5443b26ffa2fb819c7c33cc98 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 10 Apr 2024 01:30:50 +0200 Subject: [PATCH 57/90] plz just work :( --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6dfd7bbc..7b8781c1 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -228,7 +228,7 @@ jobs: MYSQL_DATABASE: fang # here we should check if mysql is ready, but this does not work options: >- - --health-cmd='$(mysqladmin ping -h localhost -P 3306 -u root --password=mysql 2>&1)' + --health-cmd "mysqladmin ping -h localhost -u root -pmysql" --health-interval 10s --health-timeout 5s --health-retries 5 From 330539a9f4bbc59105296b2fdc379ee5eade8c12 Mon Sep 17 00:00:00 2001 From: Ayrat Badykov Date: Fri, 12 Apr 2024 08:57:05 +0300 Subject: [PATCH 58/90] try to fix workflow --- .github/workflows/rust.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 7b8781c1..5c7c5cad 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/checkout@v3 - + - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} @@ -31,7 +31,7 @@ jobs: - name: Run clippy run: cargo clippy --verbose --all-targets --all-features -- -D warnings - + test_postgres_blocking: name: Test blocking runs-on: ubuntu-latest @@ -78,7 +78,7 @@ jobs: - name: Run blocking tests run: cargo test "blocking::queue::postgres" --verbose --features blocking --color always -- --nocapture - + - name: Run blocking dirty tests run: cargo test "blocking::worker" --verbose --features blocking -- --ignored @@ -247,7 +247,7 @@ jobs: - name: Setup MySQL db working-directory: ./fang/mysql_migrations - run: diesel setup --database-url "mysql://root:mysql@localhost/fang" + run: diesel setup --database-url "mysql://root:mysql@127.0.0.1/fang" - name: Run tests - run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql --color always -- --nocapture \ No newline at end of file + run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql --color always -- --nocapture From db904e6db64bfe66361b140840befcfbeb46dd55 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 15:58:32 +0200 Subject: [PATCH 59/90] update workflow to use a non deprecated version --- .github/workflows/rust.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5c7c5cad..5aca75a5 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -22,7 +22,7 @@ jobs: - stable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust @@ -64,7 +64,7 @@ jobs: - stable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} @@ -94,7 +94,7 @@ jobs: - stable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} @@ -135,7 +135,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} @@ -165,7 +165,7 @@ jobs: - stable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} @@ -198,7 +198,7 @@ jobs: - stable steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} @@ -237,7 +237,7 @@ jobs: - 3306:3306 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} From e31296d702e8a5523514ebfdd7e1bdcad48d13aa Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:01:46 +0200 Subject: [PATCH 60/90] return an error instead of panicking with fock --- fang/src/asynk/backend_sqlx.rs | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 7c4e6392..074675e5 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -811,8 +811,10 @@ async fn mysql_impl_insert_task( .rows_affected(); if affected_rows != 1 { - // here we should return an error - panic!("fock") + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); } let query_params = QueryParams::builder().uuid(&uuid).build(); @@ -858,8 +860,10 @@ async fn mysql_impl_insert_task_uniq( println!("reach here 3"); if affected_rows != 1 { - // here we should return an error - panic!("fock") + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); } let query_params = QueryParams::builder().uuid(&uuid).build(); @@ -893,8 +897,10 @@ async fn mysql_impl_update_task_state( .rows_affected(); if affected_rows != 1 { - // here we should return an error - panic!("fock") + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); } let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); @@ -929,8 +935,10 @@ async fn mysql_impl_fail_task( .rows_affected(); if affected_rows != 1 { - // here we should return an error - panic!("fock") + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); } let query_params = QueryParams::builder().uuid(&id).build(); @@ -971,8 +979,10 @@ async fn mysql_impl_retry_task( .rows_affected(); if affected_rows != 1 { - // here we should return an error - panic!("fock") + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); } let query_params = QueryParams::builder().uuid(&uuid).build(); From 6f85163f5fec7ebb098d9c5493f650c6b864aed4 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:09:40 +0200 Subject: [PATCH 61/90] deleting prints and updating comments --- fang/src/asynk/async_queue/async_queue_tests.rs | 4 +--- fang/src/asynk/backend_sqlx.rs | 10 ---------- fang/src/lib.rs | 6 ++---- 3 files changed, 3 insertions(+), 17 deletions(-) diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index a9ec1028..fa9e42bd 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -227,9 +227,7 @@ macro_rules! test_asynk_queue { assert_eq!(Some(2), number); assert_eq!(Some("AsyncTask"), type_task); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - - let task = test.fetch_and_touch_task(None).await.unwrap().unwrap(); // This fails if this FOR UPDATE SKIP LOCKED is set in query fetch task type + let task = test.fetch_and_touch_task(None).await.unwrap().unwrap(); let metadata = task.metadata.as_object().unwrap(); let number = metadata["number"].as_u64(); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 074675e5..6fb6e4cc 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -565,12 +565,8 @@ async fn general_any_impl_remove_task_by_metadata( let uniq_hash = calculate_hash(&metadata.to_string()); - println!("{query}"); - let adquire = pool; - println!("Adquire {:?}", adquire); - Ok(sqlx::query(query) .bind(uniq_hash) .execute(adquire) @@ -843,10 +839,6 @@ async fn mysql_impl_insert_task_uniq( let uniq_hash = calculate_hash(&metadata_str); - println!("{} len : {}", uniq_hash, uniq_hash.len()); - - println!("reach here"); - let affected_rows = sqlx::query(query) .bind(uuid_as_str) .bind(metadata_str) @@ -857,8 +849,6 @@ async fn mysql_impl_insert_task_uniq( .await? .rows_affected(); - println!("reach here 3"); - if affected_rows != 1 { return Err(AsyncQueueError::ResultError { expected: 1, diff --git a/fang/src/lib.rs b/fang/src/lib.rs index 4a382173..4d0aea7e 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -192,8 +192,7 @@ impl<'a> FromRow<'a, AnyRow> for Task { // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); - // This should be changed when issue https://github.com/launchbadge/sqlx/issues/2416 is fixed - // Fixed in pxp9's fork + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 let error_message: Option = row.get("error_message"); let state_str: &str = row.get("state"); // will work if database cast json to string @@ -202,8 +201,7 @@ impl<'a> FromRow<'a, AnyRow> for Task { let task_type: String = row.get("task_type"); - // This should be changed when issue https://github.com/launchbadge/sqlx/issues/2416 is fixed - // Fixed in pxp9's fork + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 let uniq_hash: Option = row.get("uniq_hash"); let retries: i32 = row.get("retries"); From a468938d3a04dbe0f9864c0611ed7f05ab09e6ab Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 20:48:15 +0200 Subject: [PATCH 62/90] deleting unwraps or justifying them --- fang/src/asynk/async_queue.rs | 8 ++++++-- fang/src/asynk/async_worker.rs | 2 +- fang/src/asynk/backend_sqlx.rs | 16 ++++------------ fang/src/lib.rs | 3 +++ 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index b9b33d1a..983567ac 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -58,8 +58,7 @@ impl From for AsyncQueueError { } /// This trait defines operations for an asynchronous queue. -/// The trait can be implemented for different storage BackendSqlXs. -/// For now, the trait is only implemented for PostgreSQL. More BackendSqlXs are planned to be implemented in the future. +/// This is implemented by the `AsyncQueue` struct which uses internally a `AnyPool` of `sqlx` to connect to the database. #[async_trait] pub trait AsyncQueueable: Send { @@ -349,6 +348,7 @@ impl AsyncQueueable for AsyncQueue { task_type: Option, ) -> Result, AsyncQueueError> { self.check_if_connection()?; + // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); let task = Self::fetch_and_touch_task_query(pool, &self.backend, task_type).await?; @@ -358,6 +358,7 @@ impl AsyncQueueable for AsyncQueue { async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; + // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); let metadata = serde_json::to_value(task)?; @@ -386,6 +387,7 @@ impl AsyncQueueable for AsyncQueue { async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; + // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); let task = Self::schedule_task_query(pool, &self.backend, task).await?; @@ -395,6 +397,7 @@ impl AsyncQueueable for AsyncQueue { async fn remove_all_tasks(&mut self) -> Result { self.check_if_connection()?; + // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().build(); @@ -410,6 +413,7 @@ impl AsyncQueueable for AsyncQueue { async fn remove_all_scheduled_tasks(&mut self) -> Result { self.check_if_connection()?; + // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); let query_params = QueryParams::builder().build(); diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index 90422f4e..7c73227d 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -99,7 +99,7 @@ where { Ok(Some(task)) => { let actual_task: Box = - serde_json::from_value(task.metadata.clone()).unwrap(); + serde_json::from_value(task.metadata.clone())?; let cron = actual_task.cron(); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 6fb6e4cc..e9ed2429 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -114,15 +114,6 @@ impl Res { } impl BackendSqlX { - pub fn _new_with_name(name: &str) -> BackendSqlX { - match name { - "PostgreSQL" => BackendSqlX::Pg, - "SQLite" => BackendSqlX::Sqlite, - "MySQL" => BackendSqlX::Mysql, - _ => unreachable!(), - } - } - pub(crate) async fn execute_query<'a>( &self, query: SqlXQuery, @@ -164,6 +155,9 @@ pub(crate) enum SqlXQuery { InsertTaskIfNotExists, } +// Unwraps by QueryParams are safe because the responsibility is of the caller +// and the caller is the library itself + #[derive(Debug, Clone)] struct BackendSqlXPg {} @@ -565,11 +559,9 @@ async fn general_any_impl_remove_task_by_metadata( let uniq_hash = calculate_hash(&metadata.to_string()); - let adquire = pool; - Ok(sqlx::query(query) .bind(uniq_hash) - .execute(adquire) + .execute(pool) .await? .rows_affected()) } diff --git a/fang/src/lib.rs b/fang/src/lib.rs index 4d0aea7e..269518ea 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -208,18 +208,21 @@ impl<'a> FromRow<'a, AnyRow> for Task { let scheduled_at_str: &str = row.get("scheduled_at"); + // This unwrap is safe because we know that the database returns the date in the correct format let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") .unwrap() .into(); let created_at_str: &str = row.get("created_at"); + // This unwrap is safe because we know that the database returns the date in the correct format let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") .unwrap() .into(); let updated_at_str: &str = row.get("updated_at"); + // This unwrap is safe because we know that the database returns the date in the correct format let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") .unwrap() .into(); From 27ea179955d52c69d9849f0e0bb249363deecd7c Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 22:46:14 +0200 Subject: [PATCH 63/90] README and moving each backend to separate files --- fang/Cargo.toml | 2 +- fang/README.md | 24 +- fang/src/asynk/async_queue.rs | 21 +- fang/src/asynk/backend_sqlx.rs | 653 ++---------------------- fang/src/asynk/backend_sqlx/mysql.rs | 351 +++++++++++++ fang/src/asynk/backend_sqlx/postgres.rs | 152 ++++++ fang/src/asynk/backend_sqlx/sqlite.rs | 143 ++++++ 7 files changed, 731 insertions(+), 615 deletions(-) create mode 100644 fang/src/asynk/backend_sqlx/mysql.rs create mode 100644 fang/src/asynk/backend_sqlx/postgres.rs create mode 100644 fang/src/asynk/backend_sqlx/sqlite.rs diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 35ec4a02..638b05fa 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -52,7 +52,7 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls", "postgres", "sqlite", "mysql"], optional = true} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls"], optional = true} [dependencies.diesel] version = "2.1" diff --git a/fang/README.md b/fang/README.md index d4b1e092..8d4ff69a 100644 --- a/fang/README.md +++ b/fang/README.md @@ -4,7 +4,7 @@ # Fang -Background task processing library for Rust. It uses Postgres DB as a task queue. +Background task processing library for Rust. It can use PostgreSQL, SQLite or MySQL as a task queue. ## Key Features @@ -31,32 +31,44 @@ Here are some of the fang's key features: ```toml [dependencies] -fang = { version = "0.10.4" , features = ["blocking"], default-features = false } +fang = { version = "1.0.0" , features = ["blocking"], default-features = false } ``` #### the Asynk feature ```toml [dependencies] -fang = { version = "0.10.4" , features = ["asynk"], default-features = false } +fang = { version = "1.0.0" , features = ["asynk"], default-features = false } ``` #### the Asynk feature with derive macro ```toml [dependencies] -fang = { version = "0.10.4" , features = ["asynk", "derive-error" ], default-features = false } +fang = { version = "1.0.0" , features = ["asynk", "derive-error" ], default-features = false } ``` #### All features ```toml -fang = { version = "0.10.4" } +fang = { version = "1.0.0" } ``` _Supports rustc 1.62+_ -2. Create the `fang_tasks` table in the Postgres database. The migration can be found in [the migrations directory](https://github.com/ayrat555/fang/blob/master/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql). +2. Create the `fang_tasks` table in the database. The migration can be found in [the migrations directory](https://github.com/ayrat555/fang/blob/master/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql). + +Migrations can be also run as code, importing the feature `migrations-{database}` being the `database` the backend queue you want to use. + +```toml +[dependencies] +fang = { version = "1.0.0" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } +``` + +```rust +use fang::run_migrations_postgres; +run_migrations_postgres(&mut connection).unwrap(); +``` ## Usage diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 983567ac..86e01f2a 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -172,6 +172,19 @@ use std::env; use super::backend_sqlx::BackendSqlX; +fn get_backend(_anykind: AnyKind) -> BackendSqlX { + match _anykind { + #[cfg(feature = "asynk-postgres")] + AnyKind::Postgres => BackendSqlX::Pg, + #[cfg(feature = "asynk-mysql")] + AnyKind::MySql => BackendSqlX::MySql, + #[cfg(feature = "asynk-sqlite")] + AnyKind::Sqlite => BackendSqlX::Sqlite, + #[allow(unreachable_patterns)] + _ => unreachable!(), + } +} + impl AsyncQueue { /// Check if the connection with db is established pub fn check_if_connection(&self) -> Result<(), AsyncQueueError> { @@ -193,13 +206,7 @@ impl AsyncQueue { let anykind = pool.any_kind(); - let backend = match anykind { - AnyKind::Postgres => BackendSqlX::Pg, - AnyKind::Sqlite => BackendSqlX::Sqlite, - AnyKind::MySql => BackendSqlX::Mysql, - }; - - self.backend = backend; + self.backend = get_backend(anykind); self.pool = Some(pool); self.connected = true; diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index e9ed2429..02065530 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -1,6 +1,4 @@ -use chrono::DateTime; -use chrono::Duration; -use chrono::Utc; +use chrono::{DateTime, Duration, Utc}; use sha2::Digest; use sha2::Sha256; use sqlx::Any; @@ -9,64 +7,31 @@ use std::fmt::Debug; use typed_builder::TypedBuilder; use uuid::Uuid; -const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = include_str!("queries_postgres/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY_POSTGRES: &str = - include_str!("queries_postgres/update_task_state.sql"); -const FAIL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES: &str = - include_str!("queries_postgres/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY_POSTGRES: &str = - include_str!("queries_postgres/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY_POSTGRES: &str = - include_str!("queries_postgres/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY_POSTGRES: &str = include_str!("queries_postgres/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES: &str = - include_str!("queries_postgres/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY_POSTGRES: &str = include_str!("queries_postgres/find_task_by_id.sql"); -const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("queries_postgres/retry_task.sql"); - -const INSERT_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY_SQLITE: &str = include_str!("queries_sqlite/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY_SQLITE: &str = include_str!("queries_sqlite/update_task_state.sql"); -const FAIL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE: &str = - include_str!("queries_sqlite/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY_SQLITE: &str = - include_str!("queries_sqlite/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY_SQLITE: &str = include_str!("queries_sqlite/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = - include_str!("queries_sqlite/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("queries_sqlite/find_task_by_id.sql"); -const RETRY_TASK_QUERY_SQLITE: &str = include_str!("queries_sqlite/retry_task.sql"); - -const INSERT_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY_MYSQL: &str = include_str!("queries_mysql/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY_MYSQL: &str = include_str!("queries_mysql/update_task_state.sql"); -const FAIL_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL: &str = - include_str!("queries_mysql/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY_MYSQL: &str = - include_str!("queries_mysql/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY_MYSQL: &str = include_str!("queries_mysql/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY_MYSQL: &str = include_str!("queries_mysql/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL: &str = - include_str!("queries_mysql/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY_MYSQL: &str = include_str!("queries_mysql/find_task_by_id.sql"); -const RETRY_TASK_QUERY_MYSQL: &str = include_str!("queries_mysql/retry_task.sql"); +#[cfg(feature = "asynk-postgres")] +mod postgres; +#[cfg(feature = "asynk-postgres")] +use self::postgres::BackendSqlXPg; + +#[cfg(feature = "asynk-sqlite")] +mod sqlite; +#[cfg(feature = "asynk-sqlite")] +use self::sqlite::BackendSqlXSQLite; +#[cfg(feature = "asynk-mysql")] +mod mysql; +#[cfg(feature = "asynk-mysql")] +use self::mysql::BackendSqlXMySQL; #[derive(Debug, Clone)] pub(crate) enum BackendSqlX { + #[cfg(feature = "asynk-postgres")] Pg, + + #[cfg(feature = "asynk-sqlite")] Sqlite, - Mysql, + + #[cfg(feature = "asynk-mysql")] + MySql, + NoBackend, } @@ -92,6 +57,7 @@ pub(crate) struct QueryParams<'a> { task: Option<&'a Task>, } +#[allow(dead_code)] pub(crate) enum Res { Bigint(u64), Task(Task), @@ -116,14 +82,17 @@ impl Res { impl BackendSqlX { pub(crate) async fn execute_query<'a>( &self, - query: SqlXQuery, - pool: &Pool, - params: QueryParams<'_>, + _query: SqlXQuery, + _pool: &Pool, + _params: QueryParams<'_>, ) -> Result { match self { - BackendSqlX::Pg => BackendSqlXPg::execute_query(query, pool, params).await, - BackendSqlX::Sqlite => BackendSqlXSQLite::execute_query(query, pool, params).await, - BackendSqlX::Mysql => BackendSqlXMySQL::execute_query(query, pool, params).await, + #[cfg(feature = "asynk-postgres")] + BackendSqlX::Pg => BackendSqlXPg::execute_query(_query, _pool, _params).await, + #[cfg(feature = "asynk-sqlite")] + BackendSqlX::Sqlite => BackendSqlXSQLite::execute_query(_query, _pool, _params).await, + #[cfg(feature = "asynk-mysql")] + BackendSqlX::MySql => BackendSqlXMySQL::execute_query(_query, _pool, _params).await, _ => unreachable!(), } } @@ -131,9 +100,12 @@ impl BackendSqlX { // I think it is useful to have this method, although it is not used pub(crate) fn _name(&self) -> &str { match self { + #[cfg(feature = "asynk-postgres")] BackendSqlX::Pg => BackendSqlXPg::_name(), + #[cfg(feature = "asynk-sqlite")] BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), - BackendSqlX::Mysql => BackendSqlXMySQL::_name(), + #[cfg(feature = "asynk-mysql")] + BackendSqlX::MySql => BackendSqlXMySQL::_name(), _ => unreachable!(), } } @@ -158,229 +130,12 @@ pub(crate) enum SqlXQuery { // Unwraps by QueryParams are safe because the responsibility is of the caller // and the caller is the library itself -#[derive(Debug, Clone)] -struct BackendSqlXPg {} - -use SqlXQuery as Q; - +use crate::AsyncQueueError; use crate::AsyncRunnable; use crate::FangTaskState; -use crate::{AsyncQueueError, Task}; -impl BackendSqlXPg { - async fn execute_query( - query: SqlXQuery, - pool: &Pool, - params: QueryParams<'_>, - ) -> Result { - match query { - Q::InsertTask => { - let task = - general_any_impl_insert_task(INSERT_TASK_QUERY_POSTGRES, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::UpdateTaskState => { - let task = general_any_impl_update_task_state( - UPDATE_TASK_STATE_QUERY_POSTGRES, - pool, - params, - ) - .await?; - Ok(Res::Task(task)) - } - Q::FailTask => { - let task = - general_any_impl_fail_task(FAIL_TASK_QUERY_POSTGRES, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::RemoveAllTask => { - let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, pool).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveAllScheduledTask => { - let affected_rows = general_any_impl_remove_all_scheduled_tasks( - REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, - pool, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTask => { - let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, pool, params).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTaskByMetadata => { - let affected_rows = general_any_impl_remove_task_by_metadata( - REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, - pool, - params, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTaskType => { - let affected_rows = general_any_impl_remove_task_type( - REMOVE_TASKS_TYPE_QUERY_POSTGRES, - pool, - params, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::FetchTaskType => { - let task = - general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_POSTGRES, pool, params) - .await?; - Ok(Res::Task(task)) - } - Q::FindTaskById => { - let task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_POSTGRES, pool, params) - .await?; - Ok(Res::Task(task)) - } - Q::RetryTask => { - let task = - general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::InsertTaskIfNotExists => { - let task = general_any_impl_insert_task_if_not_exists( - ( - FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, - INSERT_TASK_UNIQ_QUERY_POSTGRES, - ), - pool, - params, - ) - .await?; - - Ok(Res::Task(task)) - } - } - } - - fn _name() -> &'static str { - "PostgreSQL" - } -} - -#[derive(Debug, Clone)] -struct BackendSqlXSQLite {} - -impl BackendSqlXSQLite { - async fn execute_query( - query: SqlXQuery, - pool: &Pool, - params: QueryParams<'_>, - ) -> Result { - match query { - Q::InsertTask => { - let task = - general_any_impl_insert_task(INSERT_TASK_QUERY_SQLITE, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::UpdateTaskState => { - let task = general_any_impl_update_task_state( - UPDATE_TASK_STATE_QUERY_SQLITE, - pool, - params, - ) - .await?; - Ok(Res::Task(task)) - } - Q::FailTask => { - let task = general_any_impl_fail_task(FAIL_TASK_QUERY_SQLITE, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::RemoveAllTask => { - let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, pool).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveAllScheduledTask => { - let affected_rows = general_any_impl_remove_all_scheduled_tasks( - REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, - pool, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTask => { - let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, pool, params).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTaskByMetadata => { - let affected_rows = general_any_impl_remove_task_by_metadata( - REMOVE_TASK_BY_METADATA_QUERY_SQLITE, - pool, - params, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTaskType => { - let affected_rows = - general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_SQLITE, pool, params) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::FetchTaskType => { - let task = - general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_SQLITE, pool, params) - .await?; - Ok(Res::Task(task)) - } - Q::FindTaskById => { - let task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_SQLITE, pool, params) - .await?; - Ok(Res::Task(task)) - } - Q::RetryTask => { - let task = - general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::InsertTaskIfNotExists => { - let task = general_any_impl_insert_task_if_not_exists( - ( - FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, - INSERT_TASK_UNIQ_QUERY_SQLITE, - ), - pool, - params, - ) - .await?; - - Ok(Res::Task(task)) - } - } - } - - fn _name() -> &'static str { - "SQLite" - } -} +use crate::Task; +#[allow(dead_code)] async fn general_any_impl_insert_task_if_not_exists( queries: (&str, &str), pool: &Pool, @@ -392,6 +147,7 @@ async fn general_any_impl_insert_task_if_not_exists( } } +#[allow(dead_code)] async fn general_any_impl_insert_task( query: &str, pool: &Pool, @@ -417,6 +173,7 @@ async fn general_any_impl_insert_task( Ok(task) } +#[allow(dead_code)] pub(crate) fn calculate_hash(json: &str) -> String { let mut hasher = Sha256::new(); hasher.update(json.as_bytes()); @@ -424,6 +181,7 @@ pub(crate) fn calculate_hash(json: &str) -> String { hex::encode(result) } +#[allow(dead_code)] async fn general_any_impl_insert_task_uniq( query: &str, pool: &Pool, @@ -453,6 +211,7 @@ async fn general_any_impl_insert_task_uniq( Ok(task) } +#[allow(dead_code)] async fn general_any_impl_update_task_state( query: &str, pool: &Pool, @@ -477,6 +236,7 @@ async fn general_any_impl_update_task_state( Ok(task) } +#[allow(dead_code)] async fn general_any_impl_fail_task( query: &str, pool: &Pool, @@ -502,6 +262,7 @@ async fn general_any_impl_fail_task( Ok(failed_task) } +#[allow(dead_code)] async fn general_any_impl_remove_all_task( query: &str, pool: &Pool, @@ -509,6 +270,7 @@ async fn general_any_impl_remove_all_task( Ok(sqlx::query(query).execute(pool).await?.rows_affected()) } +#[allow(dead_code)] async fn general_any_impl_remove_all_scheduled_tasks( query: &str, pool: &Pool, @@ -522,6 +284,7 @@ async fn general_any_impl_remove_all_scheduled_tasks( .rows_affected()) } +#[allow(dead_code)] async fn general_any_impl_remove_task( query: &str, pool: &Pool, @@ -550,6 +313,7 @@ async fn general_any_impl_remove_task( } } +#[allow(dead_code)] async fn general_any_impl_remove_task_by_metadata( query: &str, pool: &Pool, @@ -566,6 +330,7 @@ async fn general_any_impl_remove_task_by_metadata( .rows_affected()) } +#[allow(dead_code)] async fn general_any_impl_remove_task_type( query: &str, pool: &Pool, @@ -580,6 +345,7 @@ async fn general_any_impl_remove_task_type( .rows_affected()) } +#[allow(dead_code)] async fn general_any_impl_fetch_task_type( query: &str, pool: &Pool, @@ -598,6 +364,7 @@ async fn general_any_impl_fetch_task_type( Ok(task) } +#[allow(dead_code)] async fn general_any_impl_find_task_by_uniq_hash( query: &str, pool: &Pool, @@ -614,6 +381,7 @@ async fn general_any_impl_find_task_by_uniq_hash( .ok() } +#[allow(dead_code)] async fn general_any_impl_find_task_by_id( query: &str, pool: &Pool, @@ -634,6 +402,7 @@ async fn general_any_impl_find_task_by_id( Ok(task) } +#[allow(dead_code)] async fn general_any_impl_retry_task( query: &str, pool: &Pool, @@ -667,321 +436,3 @@ async fn general_any_impl_retry_task( Ok(failed_task) } - -#[derive(Debug, Clone)] -struct BackendSqlXMySQL {} - -impl BackendSqlXMySQL { - async fn execute_query( - query: SqlXQuery, - pool: &Pool, - params: QueryParams<'_>, - ) -> Result { - match query { - Q::InsertTask => { - let task = mysql_impl_insert_task(INSERT_TASK_QUERY_MYSQL, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::UpdateTaskState => { - let task = - mysql_impl_update_task_state(UPDATE_TASK_STATE_QUERY_MYSQL, pool, params) - .await?; - Ok(Res::Task(task)) - } - - Q::FailTask => { - let task = mysql_impl_fail_task(FAIL_TASK_QUERY_MYSQL, pool, params).await?; - - Ok(Res::Task(task)) - } - - Q::RemoveAllTask => { - let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, pool).await?; - - Ok(Res::Bigint(affected_rows)) - } - - Q::RemoveAllScheduledTask => { - let affected_rows = general_any_impl_remove_all_scheduled_tasks( - REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, - pool, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - - Q::RemoveTask => { - let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, pool, params).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTaskByMetadata => { - let affected_rows = general_any_impl_remove_task_by_metadata( - REMOVE_TASK_BY_METADATA_QUERY_MYSQL, - pool, - params, - ) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveTaskType => { - let affected_rows = - general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_MYSQL, pool, params) - .await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::FetchTaskType => { - let task = - general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_MYSQL, pool, params) - .await?; - Ok(Res::Task(task)) - } - Q::FindTaskById => { - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, params) - .await?; - - Ok(Res::Task(task)) - } - Q::RetryTask => { - let task = mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, pool, params).await?; - - Ok(Res::Task(task)) - } - Q::InsertTaskIfNotExists => { - let task = mysql_any_impl_insert_task_if_not_exists( - ( - FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, - INSERT_TASK_UNIQ_QUERY_MYSQL, - ), - pool, - params, - ) - .await?; - - Ok(Res::Task(task)) - } - } - } - - fn _name() -> &'static str { - "MySQL" - } -} - -async fn mysql_impl_insert_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); - - let metadata_str = params.metadata.unwrap().to_string(); - let task_type = params.task_type.unwrap(); - - let affected_rows = sqlx::query(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(scheduled_at_str) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&uuid).build(); - - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(task) -} - -async fn mysql_impl_insert_task_uniq( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let metadata = params.metadata.unwrap(); - - let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); - - let task_type = params.task_type.unwrap(); - - let uniq_hash = calculate_hash(&metadata_str); - - let affected_rows = sqlx::query(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(uniq_hash) - .bind(scheduled_at_str) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&uuid).build(); - - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(task) -} - -async fn mysql_impl_update_task_state( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - let state_str: &str = params.state.unwrap().into(); - - let uuid = params.uuid.unwrap(); - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - - let affected_rows = sqlx::query(query) - .bind(state_str) - .bind(updated_at_str) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); - - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(task) -} - -async fn mysql_impl_fail_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); - - let id = params.task.unwrap().id; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); - - let error_message = params.error_message.unwrap(); - - let affected_rows = sqlx::query(query) - .bind(<&str>::from(FangTaskState::Failed)) - .bind(error_message) - .bind(updated_at) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&id).build(); - - let failed_task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(failed_task) -} - -async fn mysql_impl_retry_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let now = Utc::now(); - let now_str = format!("{}", now.format("%F %T%.f+00")); - - let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); - let retries = params.task.unwrap().retries + 1; - - let uuid = params.task.unwrap().id; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - - let error = params.error_message.unwrap(); - - let affected_rows = sqlx::query(query) - .bind(error) - .bind(retries) - .bind(scheduled_at_str) - .bind(now_str) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&uuid).build(); - - let failed_task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(failed_task) -} - -async fn mysql_any_impl_insert_task_if_not_exists( - queries: (&str, &str), - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - match general_any_impl_find_task_by_uniq_hash(queries.0, pool, ¶ms).await { - Some(task) => Ok(task), - None => mysql_impl_insert_task_uniq(queries.1, pool, params).await, - } -} diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs new file mode 100644 index 00000000..6c49815e --- /dev/null +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -0,0 +1,351 @@ +const INSERT_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_MYSQL: &str = include_str!("../queries_mysql/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_MYSQL: &str = include_str!("../queries_mysql/update_task_state.sql"); +const FAIL_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL: &str = + include_str!("../queries_mysql/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_MYSQL: &str = + include_str!("../queries_mysql/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_MYSQL: &str = include_str!("../queries_mysql/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_MYSQL: &str = include_str!("../queries_mysql/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL: &str = + include_str!("../queries_mysql/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_MYSQL: &str = include_str!("../queries_mysql/find_task_by_id.sql"); +const RETRY_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/retry_task.sql"); + +use super::general_any_impl_fetch_task_type; +use super::general_any_impl_find_task_by_id; +use super::general_any_impl_find_task_by_uniq_hash; +use super::general_any_impl_remove_all_scheduled_tasks; +use super::general_any_impl_remove_all_task; +use super::general_any_impl_remove_task; +use super::general_any_impl_remove_task_by_metadata; +use super::general_any_impl_remove_task_type; +use super::{calculate_hash, QueryParams, Res, SqlXQuery}; +use crate::{AsyncQueueError, FangTaskState, Task}; +use SqlXQuery as Q; + +use chrono::Duration; +use chrono::Utc; +use sqlx::{Any, Pool}; +use uuid::Uuid; + +#[derive(Debug, Clone)] +pub(crate) struct BackendSqlXMySQL {} + +impl BackendSqlXMySQL { + pub(super) async fn execute_query( + query: SqlXQuery, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match query { + Q::InsertTask => { + let task = mysql_impl_insert_task(INSERT_TASK_QUERY_MYSQL, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = + mysql_impl_update_task_state(UPDATE_TASK_STATE_QUERY_MYSQL, pool, params) + .await?; + Ok(Res::Task(task)) + } + + Q::FailTask => { + let task = mysql_impl_fail_task(FAIL_TASK_QUERY_MYSQL, pool, params).await?; + + Ok(Res::Task(task)) + } + + Q::RemoveAllTask => { + let affected_rows = + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, pool).await?; + + Ok(Res::Bigint(affected_rows)) + } + + Q::RemoveAllScheduledTask => { + let affected_rows = general_any_impl_remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + + Q::RemoveTask => { + let affected_rows = + general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, pool, params).await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = general_any_impl_remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = + general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_MYSQL, pool, params) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::FetchTaskType => { + let task = + general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_MYSQL, pool, params) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskById => { + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, params) + .await?; + + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = mysql_any_impl_insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, + INSERT_TASK_UNIQ_QUERY_MYSQL, + ), + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + } + } + + pub(super) fn _name() -> &'static str { + "MySQL" + } +} + +async fn mysql_impl_insert_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, +) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let metadata_str = params.metadata.unwrap().to_string(); + let task_type = params.task_type.unwrap(); + + let affected_rows = sqlx::query(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(scheduled_at_str) + .execute(pool) + .await? + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; + + Ok(task) +} + +async fn mysql_impl_insert_task_uniq( + query: &str, + pool: &Pool, + params: QueryParams<'_>, +) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let affected_rows = sqlx::query(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at_str) + .execute(pool) + .await? + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; + + Ok(task) +} + +async fn mysql_impl_update_task_state( + query: &str, + pool: &Pool, + params: QueryParams<'_>, +) -> Result { + let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + let state_str: &str = params.state.unwrap().into(); + + let uuid = params.uuid.unwrap(); + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let affected_rows = sqlx::query(query) + .bind(state_str) + .bind(updated_at_str) + .bind(&*uuid_as_text) + .execute(pool) + .await? + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); + + let task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; + + Ok(task) +} + +async fn mysql_impl_fail_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, +) -> Result { + let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + + let id = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + + let error_message = params.error_message.unwrap(); + + let affected_rows = sqlx::query(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&*uuid_as_text) + .execute(pool) + .await? + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&id).build(); + + let failed_task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; + + Ok(failed_task) +} + +async fn mysql_impl_retry_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, +) -> Result { + let now = Utc::now(); + let now_str = format!("{}", now.format("%F %T%.f+00")); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let retries = params.task.unwrap().retries + 1; + + let uuid = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let error = params.error_message.unwrap(); + + let affected_rows = sqlx::query(query) + .bind(error) + .bind(retries) + .bind(scheduled_at_str) + .bind(now_str) + .bind(&*uuid_as_text) + .execute(pool) + .await? + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let failed_task: Task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; + + Ok(failed_task) +} + +async fn mysql_any_impl_insert_task_if_not_exists( + queries: (&str, &str), + pool: &Pool, + params: QueryParams<'_>, +) -> Result { + match general_any_impl_find_task_by_uniq_hash(queries.0, pool, ¶ms).await { + Some(task) => Ok(task), + None => mysql_impl_insert_task_uniq(queries.1, pool, params).await, + } +} diff --git a/fang/src/asynk/backend_sqlx/postgres.rs b/fang/src/asynk/backend_sqlx/postgres.rs new file mode 100644 index 00000000..a5185a1a --- /dev/null +++ b/fang/src/asynk/backend_sqlx/postgres.rs @@ -0,0 +1,152 @@ +const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/update_task_state.sql"); +const FAIL_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/find_task_by_id.sql"); +const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/retry_task.sql"); + +#[derive(Debug, Clone)] +pub(super) struct BackendSqlXPg {} + +use SqlXQuery as Q; + +use crate::AsyncQueueError; + +use super::general_any_impl_fail_task; +use super::general_any_impl_fetch_task_type; +use super::general_any_impl_find_task_by_id; +use super::general_any_impl_insert_task; +use super::general_any_impl_insert_task_if_not_exists; +use super::general_any_impl_remove_all_scheduled_tasks; +use super::general_any_impl_remove_all_task; +use super::general_any_impl_remove_task; +use super::general_any_impl_remove_task_by_metadata; +use super::general_any_impl_remove_task_type; +use super::general_any_impl_retry_task; +use super::general_any_impl_update_task_state; +use super::{QueryParams, Res, SqlXQuery}; +use sqlx::{Any, Pool}; + +impl BackendSqlXPg { + pub(super) async fn execute_query( + query: SqlXQuery, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match query { + Q::InsertTask => { + let task = + general_any_impl_insert_task(INSERT_TASK_QUERY_POSTGRES, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = general_any_impl_update_task_state( + UPDATE_TASK_STATE_QUERY_POSTGRES, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FailTask => { + let task = + general_any_impl_fail_task(FAIL_TASK_QUERY_POSTGRES, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::RemoveAllTask => { + let affected_rows = + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, pool).await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveAllScheduledTask => { + let affected_rows = general_any_impl_remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTask => { + let affected_rows = + general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, pool, params).await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = general_any_impl_remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = general_any_impl_remove_task_type( + REMOVE_TASKS_TYPE_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::FetchTaskType => { + let task = + general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_POSTGRES, pool, params) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskById => { + let task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_POSTGRES, pool, params) + .await?; + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = + general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = general_any_impl_insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + INSERT_TASK_UNIQ_QUERY_POSTGRES, + ), + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + } + } + + pub(super) fn _name() -> &'static str { + "PostgreSQL" + } +} diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs new file mode 100644 index 00000000..af37a341 --- /dev/null +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -0,0 +1,143 @@ +const INSERT_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_SQLITE: &str = include_str!("../queries_sqlite/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/update_task_state.sql"); +const FAIL_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_SQLITE: &str = include_str!("../queries_sqlite/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("../queries_sqlite/find_task_by_id.sql"); +const RETRY_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/retry_task.sql"); + +#[derive(Debug, Clone)] +pub(super) struct BackendSqlXSQLite {} + +use super::{QueryParams, Res, SqlXQuery}; +use crate::AsyncQueueError; +use sqlx::{Any, Pool}; +use SqlXQuery as Q; + +use super::general_any_impl_fail_task; +use super::general_any_impl_fetch_task_type; +use super::general_any_impl_find_task_by_id; +use super::general_any_impl_insert_task; +use super::general_any_impl_insert_task_if_not_exists; +use super::general_any_impl_remove_all_scheduled_tasks; +use super::general_any_impl_remove_all_task; +use super::general_any_impl_remove_task; +use super::general_any_impl_remove_task_by_metadata; +use super::general_any_impl_remove_task_type; +use super::general_any_impl_retry_task; +use super::general_any_impl_update_task_state; + +impl BackendSqlXSQLite { + pub(super) async fn execute_query( + query: SqlXQuery, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match query { + Q::InsertTask => { + let task = + general_any_impl_insert_task(INSERT_TASK_QUERY_SQLITE, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = general_any_impl_update_task_state( + UPDATE_TASK_STATE_QUERY_SQLITE, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FailTask => { + let task = general_any_impl_fail_task(FAIL_TASK_QUERY_SQLITE, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::RemoveAllTask => { + let affected_rows = + general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, pool).await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveAllScheduledTask => { + let affected_rows = general_any_impl_remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTask => { + let affected_rows = + general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, pool, params).await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = general_any_impl_remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = + general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_SQLITE, pool, params) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::FetchTaskType => { + let task = + general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_SQLITE, pool, params) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskById => { + let task = + general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_SQLITE, pool, params) + .await?; + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = + general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, pool, params).await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = general_any_impl_insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + INSERT_TASK_UNIQ_QUERY_SQLITE, + ), + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + } + } + + pub(super) fn _name() -> &'static str { + "SQLite" + } +} From 46540dbb10e76d7913d185bb73eb37edddd6e856 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 22:59:43 +0200 Subject: [PATCH 64/90] final README --- fang/README.md | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/fang/README.md b/fang/README.md index 8d4ff69a..ab0f3613 100644 --- a/fang/README.md +++ b/fang/README.md @@ -36,16 +36,34 @@ fang = { version = "1.0.0" , features = ["blocking"], default-features = false } #### the Asynk feature +- PostgreSQL as a queue + +```toml +[dependencies] +fang = { version = "1.0.0" , features = ["asynk-postgres"], default-features = false } +``` + +- SQLite as a queue + ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk"], default-features = false } +fang = { version = "1.0.0" , features = ["asynk-sqlite"], default-features = false } +``` + +- MySQL as a queue + +```toml +[dependencies] +fang = { version = "1.0.0" , features = ["asynk-mysql"], default-features = false } ``` #### the Asynk feature with derive macro +Substitute `database` with your desired backend. + ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk", "derive-error" ], default-features = false } +fang = { version = "1.0.0" , features = ["asynk-{database}", "derive-error" ], default-features = false } ``` #### All features @@ -259,7 +277,6 @@ For Postgres backend: ```rust use fang::asynk::async_queue::AsyncQueue; -use fang::NoTls; use fang::AsyncRunnable; // Create an AsyncQueue @@ -273,10 +290,10 @@ let mut queue = AsyncQueue::builder() .build(); // Always connect first in order to perform any operation -queue.connect(NoTls).await.unwrap(); +queue.connect().await.unwrap(); ``` -As an easy example, we are using NoTls type. If for some reason you would like to encrypt Postgres requests, you can use [openssl](https://docs.rs/postgres-openssl/latest/postgres_openssl/) or [native-tls](https://docs.rs/postgres-native-tls/latest/postgres_native_tls/). +Encryption is always used with crate `rustls`, if you want to not use encryption, you can issue us to re-export the sqlx feature with no encryption. ```rust // AsyncTask from the first example @@ -322,7 +339,7 @@ use fang::asynk::async_worker_pool::AsyncWorkerPool; // Need to create a queue // Also insert some tasks -let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() +let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(max_pool_size) .queue(queue.clone()) // if you want to run tasks of the specific kind From 665ac478397526bda812b1af02b5fb16dcaaa7e3 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Fri, 12 Apr 2024 23:01:34 +0200 Subject: [PATCH 65/90] bump fang version and bump rust version --- fang/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 638b05fa..6487ee18 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "fang" -version = "0.10.4" +version = "1.0.0" authors = ["Ayrat Badykov " , "Pepe Márquez "] description = "Background job processing library for Rust" repository = "https://github.com/ayrat555/fang" edition = "2021" license = "MIT" readme = "README.md" -rust-version = "1.62" +rust-version = "1.77" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From bd1e1a8e3a953324f620daa24c695a391c4920b7 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Sat, 13 Apr 2024 20:47:52 +0200 Subject: [PATCH 66/90] nit: added missing newline for consistency --- fang/src/asynk/async_queue.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 86e01f2a..a9c1b184 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -686,6 +686,7 @@ impl AsyncQueue { #[cfg(test)] test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} + #[cfg(test)] test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} From a291459247d9b7f9636067f6c10af68ea0063b09 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Sat, 13 Apr 2024 21:13:08 +0200 Subject: [PATCH 67/90] update readme --- fang/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fang/README.md b/fang/README.md index ab0f3613..222f078c 100644 --- a/fang/README.md +++ b/fang/README.md @@ -293,7 +293,7 @@ let mut queue = AsyncQueue::builder() queue.connect().await.unwrap(); ``` -Encryption is always used with crate `rustls`, if you want to not use encryption, you can issue us to re-export the sqlx feature with no encryption. +Encryption is always used with crate `rustls`. We plan to add the possibility of disabling it in the future. ```rust // AsyncTask from the first example From 108d55c7208b2b44d90e12baad1a635a724ff259 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Sat, 13 Apr 2024 21:17:36 +0200 Subject: [PATCH 68/90] bump fang version following semver --- fang/Cargo.toml | 4 ++-- fang/README.md | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 6487ee18..fd83b2ad 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fang" -version = "1.0.0" +version = "0.11.0" authors = ["Ayrat Badykov " , "Pepe Márquez "] description = "Background job processing library for Rust" repository = "https://github.com/ayrat555/fang" @@ -84,4 +84,4 @@ optional = true [dependencies.diesel_migrations] version = "2.1.0" optional = true -default-features = false \ No newline at end of file +default-features = false diff --git a/fang/README.md b/fang/README.md index 222f078c..7a802835 100644 --- a/fang/README.md +++ b/fang/README.md @@ -31,7 +31,7 @@ Here are some of the fang's key features: ```toml [dependencies] -fang = { version = "1.0.0" , features = ["blocking"], default-features = false } +fang = { version = "0.11.0" , features = ["blocking"], default-features = false } ``` #### the Asynk feature @@ -40,21 +40,21 @@ fang = { version = "1.0.0" , features = ["blocking"], default-features = false } ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk-postgres"], default-features = false } +fang = { version = "0.11.0" , features = ["asynk-postgres"], default-features = false } ``` - SQLite as a queue ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk-sqlite"], default-features = false } +fang = { version = "0.11.0" , features = ["asynk-sqlite"], default-features = false } ``` - MySQL as a queue ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk-mysql"], default-features = false } +fang = { version = "0.11.0" , features = ["asynk-mysql"], default-features = false } ``` #### the Asynk feature with derive macro @@ -63,13 +63,13 @@ Substitute `database` with your desired backend. ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk-{database}", "derive-error" ], default-features = false } +fang = { version = "0.11.0" , features = ["asynk-{database}", "derive-error" ], default-features = false } ``` #### All features ```toml -fang = { version = "1.0.0" } +fang = { version = "0.11.0" } ``` _Supports rustc 1.62+_ @@ -80,7 +80,7 @@ Migrations can be also run as code, importing the feature `migrations-{database} ```toml [dependencies] -fang = { version = "1.0.0" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } +fang = { version = "0.11.0" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } ``` ```rust From 0e591757ab53a4e7c1d54d140b004d03264659d9 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Sun, 14 Apr 2024 12:54:41 +0200 Subject: [PATCH 69/90] address ayrat issues --- fang/Cargo.toml | 2 +- fang/README.md | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index fd83b2ad..39caf2cb 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fang" -version = "0.11.0" +version = "0.11.0-rc0" authors = ["Ayrat Badykov " , "Pepe Márquez "] description = "Background job processing library for Rust" repository = "https://github.com/ayrat555/fang" diff --git a/fang/README.md b/fang/README.md index 7a802835..c40fdc1f 100644 --- a/fang/README.md +++ b/fang/README.md @@ -31,7 +31,7 @@ Here are some of the fang's key features: ```toml [dependencies] -fang = { version = "0.11.0" , features = ["blocking"], default-features = false } +fang = { version = "0.11" , features = ["blocking"], default-features = false } ``` #### the Asynk feature @@ -40,21 +40,21 @@ fang = { version = "0.11.0" , features = ["blocking"], default-features = false ```toml [dependencies] -fang = { version = "0.11.0" , features = ["asynk-postgres"], default-features = false } +fang = { version = "0.11" , features = ["asynk-postgres"], default-features = false } ``` - SQLite as a queue ```toml [dependencies] -fang = { version = "0.11.0" , features = ["asynk-sqlite"], default-features = false } +fang = { version = "0.11" , features = ["asynk-sqlite"], default-features = false } ``` - MySQL as a queue ```toml [dependencies] -fang = { version = "0.11.0" , features = ["asynk-mysql"], default-features = false } +fang = { version = "0.11" , features = ["asynk-mysql"], default-features = false } ``` #### the Asynk feature with derive macro @@ -63,24 +63,24 @@ Substitute `database` with your desired backend. ```toml [dependencies] -fang = { version = "0.11.0" , features = ["asynk-{database}", "derive-error" ], default-features = false } +fang = { version = "0.11" , features = ["asynk-{database}", "derive-error" ], default-features = false } ``` #### All features ```toml -fang = { version = "0.11.0" } +fang = { version = "0.11" } ``` _Supports rustc 1.62+_ -2. Create the `fang_tasks` table in the database. The migration can be found in [the migrations directory](https://github.com/ayrat555/fang/blob/master/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql). +1. Create the `fang_tasks` table in the database. The migration of each database can be found in `fang/{database}-migrations` where `database` is `postgres`, `mysql` or `sqlite`. Migrations can be also run as code, importing the feature `migrations-{database}` being the `database` the backend queue you want to use. ```toml [dependencies] -fang = { version = "0.11.0" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } +fang = { version = "0.11" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } ``` ```rust From 7ec0e799d301897ea8c6444c91cf3b84c38a372e Mon Sep 17 00:00:00 2001 From: Pmarquez <48651252+pxp9@users.noreply.github.com> Date: Mon, 15 Apr 2024 09:30:51 +0200 Subject: [PATCH 70/90] Fix/independent decoding (#149) * independent decoding * Postgres and Sqlite passing * fix unreachable pattern warn * delete vscode stuff * I think this may be a Rust compiler issue, but I fixed it so I will take it * fix clippy --- fang/src/asynk/async_queue.rs | 167 ++++- .../asynk/async_queue/async_queue_tests.rs | 2 +- fang/src/asynk/backend_sqlx.rs | 575 ++++++++------- fang/src/asynk/backend_sqlx/mysql.rs | 671 +++++++++++------- fang/src/asynk/backend_sqlx/postgres.rs | 174 +++-- fang/src/asynk/backend_sqlx/sqlite.rs | 177 +++-- 6 files changed, 1112 insertions(+), 654 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index a9c1b184..013670aa 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -13,17 +13,35 @@ use async_trait::async_trait; use chrono::DateTime; use chrono::Utc; use cron::Schedule; -//use sqlx::any::install_default_drivers; // this is supported in sqlx 0.7 +use sqlx::any::AnyConnectOptions; use sqlx::any::AnyKind; +#[cfg(any( + feature = "asynk-postgres", + feature = "asynk-mysql", + feature = "asynk-sqlite" +))] use sqlx::pool::PoolOptions; -use sqlx::Any; -use sqlx::AnyPool; -use sqlx::Pool; +//use sqlx::any::install_default_drivers; // this is supported in sqlx 0.7 use std::str::FromStr; use thiserror::Error; use typed_builder::TypedBuilder; use uuid::Uuid; +#[cfg(feature = "asynk-postgres")] +use sqlx::PgPool; +#[cfg(feature = "asynk-postgres")] +use sqlx::Postgres; + +#[cfg(feature = "asynk-mysql")] +use sqlx::MySql; +#[cfg(feature = "asynk-mysql")] +use sqlx::MySqlPool; + +#[cfg(feature = "asynk-sqlite")] +use sqlx::Sqlite; +#[cfg(feature = "asynk-sqlite")] +use sqlx::SqlitePool; + #[cfg(test)] use self::async_queue_tests::test_asynk_queue; @@ -134,11 +152,51 @@ pub trait AsyncQueueable: Send { /// .build(); /// ``` /// +/// + +#[derive(Debug, Clone)] +pub(crate) enum InternalPool { + #[cfg(feature = "asynk-postgres")] + Pg(PgPool), + #[cfg(feature = "asynk-mysql")] + MySql(MySqlPool), + #[cfg(feature = "asynk-sqlite")] + Sqlite(SqlitePool), +} + +impl InternalPool { + #[cfg(feature = "asynk-postgres")] + pub(crate) fn unwrap_pg_pool(&self) -> &PgPool { + match self { + InternalPool::Pg(pool) => pool, + #[allow(unreachable_patterns)] + _ => panic!("Not a PgPool!"), + } + } + + #[cfg(feature = "asynk-mysql")] + pub(crate) fn unwrap_mysql_pool(&self) -> &MySqlPool { + match self { + InternalPool::MySql(pool) => pool, + #[allow(unreachable_patterns)] + _ => panic!("Not a MySqlPool!"), + } + } + + #[cfg(feature = "asynk-sqlite")] + pub(crate) fn unwrap_sqlite_pool(&self) -> &SqlitePool { + match self { + InternalPool::Sqlite(pool) => pool, + #[allow(unreachable_patterns)] + _ => panic!("Not a SqlitePool!"), + } + } +} #[derive(TypedBuilder, Debug, Clone)] pub struct AsyncQueue { #[builder(default=None, setter(skip))] - pool: Option, + pool: Option, #[builder(setter(into))] uri: String, #[builder(setter(into))] @@ -152,19 +210,19 @@ pub struct AsyncQueue { #[cfg(test)] use tokio::sync::Mutex; -#[cfg(test)] +#[cfg(all(test, feature = "asynk-postgres"))] static ASYNC_QUEUE_POSTGRES_TEST_COUNTER: Mutex = Mutex::const_new(0); -#[cfg(test)] +#[cfg(all(test, feature = "asynk-sqlite"))] static ASYNC_QUEUE_SQLITE_TEST_COUNTER: Mutex = Mutex::const_new(0); -#[cfg(test)] +#[cfg(all(test, feature = "asynk-mysql"))] static ASYNC_QUEUE_MYSQL_TEST_COUNTER: Mutex = Mutex::const_new(0); #[cfg(test)] use sqlx::Executor; -#[cfg(test)] +#[cfg(all(test, feature = "asynk-sqlite"))] use std::path::Path; #[cfg(test)] @@ -172,16 +230,41 @@ use std::env; use super::backend_sqlx::BackendSqlX; -fn get_backend(_anykind: AnyKind) -> BackendSqlX { - match _anykind { +async fn get_backend( + kind: AnyKind, + _uri: &str, + _max_connections: u32, +) -> Result<(BackendSqlX, InternalPool), AsyncQueueError> { + match kind { #[cfg(feature = "asynk-postgres")] - AnyKind::Postgres => BackendSqlX::Pg, + AnyKind::Postgres => { + let pool = PoolOptions::::new() + .max_connections(_max_connections) + .connect(_uri) + .await?; + + Ok((BackendSqlX::Pg, InternalPool::Pg(pool))) + } #[cfg(feature = "asynk-mysql")] - AnyKind::MySql => BackendSqlX::MySql, + AnyKind::MySql => { + let pool = PoolOptions::::new() + .max_connections(_max_connections) + .connect(_uri) + .await?; + + Ok((BackendSqlX::MySql, InternalPool::MySql(pool))) + } #[cfg(feature = "asynk-sqlite")] - AnyKind::Sqlite => BackendSqlX::Sqlite, + AnyKind::Sqlite => { + let pool = PoolOptions::::new() + .max_connections(_max_connections) + .connect(_uri) + .await?; + + Ok((BackendSqlX::Sqlite, InternalPool::Sqlite(pool))) + } #[allow(unreachable_patterns)] - _ => unreachable!(), + _ => panic!("Not a valid backend"), } } @@ -199,22 +282,18 @@ impl AsyncQueue { pub async fn connect(&mut self) -> Result<(), AsyncQueueError> { //install_default_drivers(); - let pool: AnyPool = PoolOptions::new() - .max_connections(self.max_pool_size) - .connect(&self.uri) - .await?; + let kind: AnyKind = self.uri.parse::()?.kind(); - let anykind = pool.any_kind(); - - self.backend = get_backend(anykind); + let (backend, pool) = get_backend(kind, &self.uri, self.max_pool_size).await?; self.pool = Some(pool); + self.backend = backend; self.connected = true; Ok(()) } async fn fetch_and_touch_task_query( - pool: &Pool, + pool: &InternalPool, backend: &BackendSqlX, task_type: Option, ) -> Result, AsyncQueueError> { @@ -250,7 +329,7 @@ impl AsyncQueue { } async fn insert_task_query( - pool: &Pool, + pool: &InternalPool, backend: &BackendSqlX, metadata: &serde_json::Value, task_type: &str, @@ -271,7 +350,7 @@ impl AsyncQueue { } async fn insert_task_if_not_exist_query( - pool: &Pool, + pool: &InternalPool, backend: &BackendSqlX, metadata: &serde_json::Value, task_type: &str, @@ -292,7 +371,7 @@ impl AsyncQueue { } async fn schedule_task_query( - pool: &Pool, + pool: &InternalPool, backend: &BackendSqlX, task: &dyn AsyncRunnable, ) -> Result { @@ -553,7 +632,7 @@ impl AsyncQueueable for AsyncQueue { } } -#[cfg(test)] +#[cfg(all(test, feature = "asynk-postgres"))] impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_postgres() -> Self { @@ -575,7 +654,14 @@ impl AsyncQueue { let create_query: &str = &format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); + let mut conn = res + .pool + .as_mut() + .unwrap() + .unwrap_pg_pool() + .acquire() + .await + .unwrap(); log::info!("Deleting database {db_name} ..."); conn.execute(delete_query).await.unwrap(); @@ -600,7 +686,10 @@ impl AsyncQueue { res } +} +#[cfg(all(test, feature = "asynk-sqlite"))] +impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_sqlite() -> Self { dotenvy::dotenv().expect(".env file not found"); @@ -632,7 +721,10 @@ impl AsyncQueue { res.connect().await.expect("fail to connect"); res } +} +#[cfg(all(test, feature = "asynk-mysql"))] +impl AsyncQueue { /// Provides an AsyncQueue connected to its own DB pub async fn test_mysql() -> Self { dotenvy::dotenv().expect(".env file not found"); @@ -657,7 +749,14 @@ impl AsyncQueue { let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - let mut conn = res.pool.as_mut().unwrap().acquire().await.unwrap(); + let mut conn = res + .pool + .as_mut() + .unwrap() + .unwrap_mysql_pool() + .acquire() + .await + .unwrap(); log::info!("Deleting database {db_name} ..."); conn.execute(delete_query).await.unwrap(); @@ -684,11 +783,11 @@ impl AsyncQueue { } } -#[cfg(test)] -test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::test_postgres()} +#[cfg(all(test, feature = "asynk-postgres"))] +test_asynk_queue! {postgres, crate::AsyncQueue,crate::AsyncQueue::test_postgres()} -#[cfg(test)] -test_asynk_queue! {sqlite, crate::AsyncQueue, crate::AsyncQueue::test_sqlite()} +#[cfg(all(test, feature = "asynk-sqlite"))] +test_asynk_queue! {sqlite, crate::AsyncQueue,crate::AsyncQueue::test_sqlite()} -#[cfg(test)] +#[cfg(all(test, feature = "asynk-mysql"))] test_asynk_queue! {mysql, crate::AsyncQueue, crate::AsyncQueue::test_mysql()} diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index fa9e42bd..62e836b3 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -113,7 +113,7 @@ macro_rules! test_asynk_queue { } #[tokio::test] - async fn failed_task_query_test() { + async fn failed_task_test() { let mut test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 02065530..10e4dae4 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -1,8 +1,15 @@ use chrono::{DateTime, Duration, Utc}; use sha2::Digest; use sha2::Sha256; -use sqlx::Any; +use sqlx::any::AnyQueryResult; +use sqlx::database::HasArguments; +use sqlx::Database; +use sqlx::Encode; +use sqlx::Executor; +use sqlx::FromRow; +use sqlx::IntoArguments; use sqlx::Pool; +use sqlx::Type; use std::fmt::Debug; use typed_builder::TypedBuilder; use uuid::Uuid; @@ -35,6 +42,7 @@ pub(crate) enum BackendSqlX { NoBackend, } +#[allow(dead_code)] #[derive(TypedBuilder, Clone)] pub(crate) struct QueryParams<'a> { #[builder(default, setter(strip_option))] @@ -80,19 +88,25 @@ impl Res { } impl BackendSqlX { - pub(crate) async fn execute_query<'a>( + pub(crate) async fn execute_query( &self, _query: SqlXQuery, - _pool: &Pool, + _pool: &InternalPool, _params: QueryParams<'_>, ) -> Result { match self { #[cfg(feature = "asynk-postgres")] - BackendSqlX::Pg => BackendSqlXPg::execute_query(_query, _pool, _params).await, + BackendSqlX::Pg => { + BackendSqlXPg::execute_query(_query, _pool.unwrap_pg_pool(), _params).await + } #[cfg(feature = "asynk-sqlite")] - BackendSqlX::Sqlite => BackendSqlXSQLite::execute_query(_query, _pool, _params).await, + BackendSqlX::Sqlite => { + BackendSqlXSQLite::execute_query(_query, _pool.unwrap_sqlite_pool(), _params).await + } #[cfg(feature = "asynk-mysql")] - BackendSqlX::MySql => BackendSqlXMySQL::execute_query(_query, _pool, _params).await, + BackendSqlX::MySql => { + BackendSqlXMySQL::execute_query(_query, _pool.unwrap_mysql_pool(), _params).await + } _ => unreachable!(), } } @@ -133,46 +147,9 @@ pub(crate) enum SqlXQuery { use crate::AsyncQueueError; use crate::AsyncRunnable; use crate::FangTaskState; +use crate::InternalPool; use crate::Task; -#[allow(dead_code)] -async fn general_any_impl_insert_task_if_not_exists( - queries: (&str, &str), - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - match general_any_impl_find_task_by_uniq_hash(queries.0, pool, ¶ms).await { - Some(task) => Ok(task), - None => general_any_impl_insert_task_uniq(queries.1, pool, params).await, - } -} - -#[allow(dead_code)] -async fn general_any_impl_insert_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); - - let metadata_str = params.metadata.unwrap().to_string(); - let task_type = params.task_type.unwrap(); - - let task: Task = sqlx::query_as(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(scheduled_at_str) - .fetch_one(pool) - .await?; - - Ok(task) -} - #[allow(dead_code)] pub(crate) fn calculate_hash(json: &str) -> String { let mut hasher = Sha256::new(); @@ -181,258 +158,304 @@ pub(crate) fn calculate_hash(json: &str) -> String { hex::encode(result) } -#[allow(dead_code)] -async fn general_any_impl_insert_task_uniq( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let metadata = params.metadata.unwrap(); - - let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); - - let task_type = params.task_type.unwrap(); - - let uniq_hash = calculate_hash(&metadata_str); - - let task: Task = sqlx::query_as(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(uniq_hash) - .bind(scheduled_at_str) - .fetch_one(pool) - .await?; - Ok(task) -} +trait FangQueryable +where + DB: Database, + for<'r> Task: FromRow<'r, ::Row>, + for<'r> std::string::String: Encode<'r, DB> + Type, + for<'r> &'r str: Encode<'r, DB> + Type, + for<'r> i32: Encode<'r, DB> + Type, + for<'r> &'r Pool: Executor<'r, Database = DB>, + for<'r> >::Arguments: IntoArguments<'r, DB>, + ::QueryResult: Into, +{ + async fn fetch_task_type( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let task_type = params.task_type.unwrap(); + + let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + let task: Task = sqlx::query_as(query) + .bind(task_type) + .bind(now_str) + .fetch_one(pool) + .await?; + + Ok(task) + } -#[allow(dead_code)] -async fn general_any_impl_update_task_state( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + async fn find_task_by_uniq_hash( + query: &str, + pool: &Pool, + params: &QueryParams<'_>, + ) -> Option { + let metadata = params.metadata.unwrap(); - let state_str: &str = params.state.unwrap().into(); + let uniq_hash = calculate_hash(&metadata.to_string()); - let uuid = params.uuid.unwrap(); + sqlx::query_as(query) + .bind(uniq_hash) + .fetch_one(pool) + .await + .ok() + } - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + async fn find_task_by_id( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .uuid + .unwrap() + .as_hyphenated() + .encode_lower(&mut buffer); + + let task: Task = sqlx::query_as(query) + .bind(&*uuid_as_text) + .fetch_one(pool) + .await?; + + Ok(task) + } - let task: Task = sqlx::query_as(query) - .bind(state_str) - .bind(updated_at_str) - .bind(&*uuid_as_text) - .fetch_one(pool) - .await?; + async fn retry_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let now = Utc::now(); + let now_str = format!("{}", now.format("%F %T%.f+00")); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let retries = params.task.unwrap().retries + 1; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .task + .unwrap() + .id + .as_hyphenated() + .encode_lower(&mut buffer); + + let error = params.error_message.unwrap(); + + let failed_task: Task = sqlx::query_as(query) + .bind(error) + .bind(retries) + .bind(scheduled_at_str) + .bind(now_str) + .bind(&*uuid_as_text) + .fetch_one(pool) + .await?; + + Ok(failed_task) + } - Ok(task) -} + async fn insert_task_uniq( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let task: Task = sqlx::query_as(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at_str) + .fetch_one(pool) + .await?; + Ok(task) + } -#[allow(dead_code)] -async fn general_any_impl_fail_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + async fn insert_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let metadata_str = params.metadata.unwrap().to_string(); + let task_type = params.task_type.unwrap(); + + let task: Task = sqlx::query_as(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(scheduled_at_str) + .fetch_one(pool) + .await?; + + Ok(task) + } - let id = params.task.unwrap().id; + async fn update_task_state( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + let state_str: &str = params.state.unwrap().into(); - let error_message = params.error_message.unwrap(); + let uuid = params.uuid.unwrap(); - let failed_task: Task = sqlx::query_as(query) - .bind(<&str>::from(FangTaskState::Failed)) - .bind(error_message) - .bind(updated_at) - .bind(&*uuid_as_text) - .fetch_one(pool) - .await?; + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - Ok(failed_task) -} + let task: Task = sqlx::query_as(query) + .bind(state_str) + .bind(updated_at_str) + .bind(&*uuid_as_text) + .fetch_one(pool) + .await?; -#[allow(dead_code)] -async fn general_any_impl_remove_all_task( - query: &str, - pool: &Pool, -) -> Result { - Ok(sqlx::query(query).execute(pool).await?.rows_affected()) -} + Ok(task) + } -#[allow(dead_code)] -async fn general_any_impl_remove_all_scheduled_tasks( - query: &str, - pool: &Pool, -) -> Result { - let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - Ok(sqlx::query(query) - .bind(now_str) - .execute(pool) - .await? - .rows_affected()) -} + async fn fail_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); -#[allow(dead_code)] -async fn general_any_impl_remove_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .uuid - .unwrap() - .as_hyphenated() - .encode_lower(&mut buffer); - - let result = sqlx::query(query) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if result != 1 { - Err(AsyncQueueError::ResultError { - expected: 1, - found: result, - }) - } else { - Ok(result) + let id = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + + let error_message = params.error_message.unwrap(); + + let failed_task: Task = sqlx::query_as(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&*uuid_as_text) + .fetch_one(pool) + .await?; + + Ok(failed_task) } -} -#[allow(dead_code)] -async fn general_any_impl_remove_task_by_metadata( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let metadata = serde_json::to_value(params.runnable.unwrap())?; - - let uniq_hash = calculate_hash(&metadata.to_string()); - - Ok(sqlx::query(query) - .bind(uniq_hash) - .execute(pool) - .await? - .rows_affected()) -} + async fn remove_all_task(query: &str, pool: &Pool) -> Result { + // This converts QueryResult to AnyQueryResult and then to u64 + // do not delete into() method and do not delete Into trait bound + Ok(sqlx::query(query) + .execute(pool) + .await? + .into() + .rows_affected()) + } -#[allow(dead_code)] -async fn general_any_impl_remove_task_type( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let task_type = params.task_type.unwrap(); - - Ok(sqlx::query(query) - .bind(task_type) - .execute(pool) - .await? - .rows_affected()) -} + async fn remove_all_scheduled_tasks( + query: &str, + pool: &Pool, + ) -> Result { + let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + // This converts QueryResult to AnyQueryResult and then to u64 + // do not delete into() method and do not delete Into trait bound + + Ok(sqlx::query(query) + .bind(now_str) + .execute(pool) + .await? + .into() + .rows_affected()) + } -#[allow(dead_code)] -async fn general_any_impl_fetch_task_type( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let task_type = params.task_type.unwrap(); - - let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - let task: Task = sqlx::query_as(query) - .bind(task_type) - .bind(now_str) - .fetch_one(pool) - .await?; - - Ok(task) -} + async fn remove_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .uuid + .unwrap() + .as_hyphenated() + .encode_lower(&mut buffer); + + let result = sqlx::query(query) + .bind(&*uuid_as_text) + .execute(pool) + .await? + .into() + .rows_affected(); + + if result != 1 { + Err(AsyncQueueError::ResultError { + expected: 1, + found: result, + }) + } else { + Ok(result) + } + } -#[allow(dead_code)] -async fn general_any_impl_find_task_by_uniq_hash( - query: &str, - pool: &Pool, - params: &QueryParams<'_>, -) -> Option { - let metadata = params.metadata.unwrap(); - - let uniq_hash = calculate_hash(&metadata.to_string()); - - sqlx::query_as(query) - .bind(uniq_hash) - .fetch_one(pool) - .await - .ok() -} + async fn remove_task_by_metadata( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let metadata = serde_json::to_value(params.runnable.unwrap())?; + + let uniq_hash = calculate_hash(&metadata.to_string()); + + Ok(sqlx::query(query) + .bind(uniq_hash) + .execute(pool) + .await? + .into() + .rows_affected()) + } -#[allow(dead_code)] -async fn general_any_impl_find_task_by_id( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .uuid - .unwrap() - .as_hyphenated() - .encode_lower(&mut buffer); - - let task: Task = sqlx::query_as(query) - .bind(&*uuid_as_text) - .fetch_one(pool) - .await?; - - Ok(task) -} + async fn remove_task_type( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let task_type = params.task_type.unwrap(); + + Ok(sqlx::query(query) + .bind(task_type) + .execute(pool) + .await? + .into() + .rows_affected()) + } -#[allow(dead_code)] -async fn general_any_impl_retry_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let now = Utc::now(); - let now_str = format!("{}", now.format("%F %T%.f+00")); - - let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); - let retries = params.task.unwrap().retries + 1; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .task - .unwrap() - .id - .as_hyphenated() - .encode_lower(&mut buffer); - - let error = params.error_message.unwrap(); - - let failed_task: Task = sqlx::query_as(query) - .bind(error) - .bind(retries) - .bind(scheduled_at_str) - .bind(now_str) - .bind(&*uuid_as_text) - .fetch_one(pool) - .await?; - - Ok(failed_task) + async fn insert_task_if_not_exists( + queries: (&str, &str), + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match Self::find_task_by_uniq_hash(queries.0, pool, ¶ms).await { + Some(task) => Ok(task), + None => Self::insert_task_uniq(queries.1, pool, params).await, + } + } } diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs index 6c49815e..f7c6044a 100644 --- a/fang/src/asynk/backend_sqlx/mysql.rs +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -15,61 +15,399 @@ const FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL: &str = const FIND_TASK_BY_ID_QUERY_MYSQL: &str = include_str!("../queries_mysql/find_task_by_id.sql"); const RETRY_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/retry_task.sql"); -use super::general_any_impl_fetch_task_type; -use super::general_any_impl_find_task_by_id; -use super::general_any_impl_find_task_by_uniq_hash; -use super::general_any_impl_remove_all_scheduled_tasks; -use super::general_any_impl_remove_all_task; -use super::general_any_impl_remove_task; -use super::general_any_impl_remove_task_by_metadata; -use super::general_any_impl_remove_task_type; -use super::{calculate_hash, QueryParams, Res, SqlXQuery}; -use crate::{AsyncQueueError, FangTaskState, Task}; -use SqlXQuery as Q; - use chrono::Duration; -use chrono::Utc; -use sqlx::{Any, Pool}; +use chrono::{DateTime, Utc}; +use sqlx::mysql::MySqlQueryResult; +use sqlx::mysql::MySqlRow; +use sqlx::FromRow; +use sqlx::MySql; +use sqlx::Pool; +use sqlx::Row; use uuid::Uuid; +use SqlXQuery as Q; + +use super::FangQueryable; +use super::{calculate_hash, QueryParams, Res, SqlXQuery}; +use crate::{AsyncQueueError, FangTaskState, Task}; #[derive(Debug, Clone)] -pub(crate) struct BackendSqlXMySQL {} +pub(super) struct BackendSqlXMySQL {} + +impl<'a> FromRow<'a, MySqlRow> for Task { + fn from_row(row: &'a MySqlRow) -> Result { + let uuid_as_text: &str = row.get("id"); + + let id = Uuid::parse_str(uuid_as_text).unwrap(); + + let raw: &str = row.get("metadata"); // will work if database cast json to string + let raw = raw.replace('\\', ""); + + // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting + let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let error_message: Option = row.get("error_message"); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let uniq_hash: Option = row.get("uniq_hash"); + + let retries: i32 = row.get("retries"); + + let scheduled_at_str: &str = row.get("scheduled_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let created_at_str: &str = row.get("created_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let updated_at_str: &str = row.get("updated_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + +impl FangQueryable for BackendSqlXMySQL { + async fn insert_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let metadata_str = params.metadata.unwrap().to_string(); + let task_type = params.task_type.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(scheduled_at_str) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(task) + } + + async fn update_task_state( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + + let state_str: &str = params.state.unwrap().into(); + + let uuid = params.uuid.unwrap(); + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(state_str) + .bind(updated_at_str) + .bind(&*uuid_as_text) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); + + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(task) + } + + async fn insert_task_uniq( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + let mut buffer = Uuid::encode_buffer(); + let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(uuid_as_str) + .bind(metadata_str) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at_str) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(task) + } + + async fn fail_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + + let id = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + + let error_message = params.error_message.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&*uuid_as_text) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&id).build(); + + let failed_task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(failed_task) + } + + async fn retry_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let now = Utc::now(); + let now_str = format!("{}", now.format("%F %T%.f+00")); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let retries = params.task.unwrap().retries + 1; + + let uuid = params.task.unwrap().id; + + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); + + let error = params.error_message.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(error) + .bind(retries) + .bind(scheduled_at_str) + .bind(now_str) + .bind(&*uuid_as_text) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let failed_task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(failed_task) + } + + async fn insert_task_if_not_exists( + queries: (&str, &str), + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match >::find_task_by_uniq_hash( + queries.0, pool, ¶ms, + ) + .await + { + Some(task) => Ok(task), + None => { + >::insert_task_uniq( + queries.1, pool, params, + ) + .await + } + } + } + + async fn find_task_by_id( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let mut buffer = Uuid::encode_buffer(); + let uuid_as_text = params + .uuid + .unwrap() + .as_hyphenated() + .encode_lower(&mut buffer); + + let task: Task = sqlx::query_as(query) + .bind(&*uuid_as_text) + .fetch_one(pool) + .await?; + + Ok(task) + } +} impl BackendSqlXMySQL { pub(super) async fn execute_query( query: SqlXQuery, - pool: &Pool, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match query { Q::InsertTask => { - let task = mysql_impl_insert_task(INSERT_TASK_QUERY_MYSQL, pool, params).await?; + let task = >::insert_task( + INSERT_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::UpdateTaskState => { - let task = - mysql_impl_update_task_state(UPDATE_TASK_STATE_QUERY_MYSQL, pool, params) - .await?; + let task = >::update_task_state( + UPDATE_TASK_STATE_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::FailTask => { - let task = mysql_impl_fail_task(FAIL_TASK_QUERY_MYSQL, pool, params).await?; + let task = >::fail_task( + FAIL_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::RemoveAllTask => { - let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_MYSQL, pool).await?; - - Ok(Res::Bigint(affected_rows)) - } - - Q::RemoveAllScheduledTask => { - let affected_rows = general_any_impl_remove_all_scheduled_tasks( - REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, + let affected_rows = >::remove_all_task( + REMOVE_ALL_TASK_QUERY_MYSQL, pool, ) .await?; @@ -77,15 +415,20 @@ impl BackendSqlXMySQL { Ok(Res::Bigint(affected_rows)) } - Q::RemoveTask => { + Q::RemoveAllScheduledTask => { let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_MYSQL, pool, params).await?; + >::remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, + pool, + ) + .await?; Ok(Res::Bigint(affected_rows)) } - Q::RemoveTaskByMetadata => { - let affected_rows = general_any_impl_remove_task_by_metadata( - REMOVE_TASK_BY_METADATA_QUERY_MYSQL, + + Q::RemoveTask => { + let affected_rows = >::remove_task( + REMOVE_TASK_QUERY_MYSQL, pool, params, ) @@ -93,33 +436,58 @@ impl BackendSqlXMySQL { Ok(Res::Bigint(affected_rows)) } - Q::RemoveTaskType => { + Q::RemoveTaskByMetadata => { let affected_rows = - general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_MYSQL, pool, params) - .await?; + >::remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = >::remove_task_type( + REMOVE_TASKS_TYPE_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { - let task = - general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_MYSQL, pool, params) - .await?; + let task = >::fetch_task_type( + FETCH_TASK_TYPE_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::FindTaskById => { - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, params) - .await?; + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::RetryTask => { - let task = mysql_impl_retry_task(RETRY_TASK_QUERY_MYSQL, pool, params).await?; + let task = >::retry_task( + RETRY_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::InsertTaskIfNotExists => { - let task = mysql_any_impl_insert_task_if_not_exists( + let task = >::insert_task_if_not_exists( ( FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, INSERT_TASK_UNIQ_QUERY_MYSQL, @@ -138,214 +506,3 @@ impl BackendSqlXMySQL { "MySQL" } } - -async fn mysql_impl_insert_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); - - let metadata_str = params.metadata.unwrap().to_string(); - let task_type = params.task_type.unwrap(); - - let affected_rows = sqlx::query(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(scheduled_at_str) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&uuid).build(); - - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(task) -} - -async fn mysql_impl_insert_task_uniq( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - - let metadata = params.metadata.unwrap(); - - let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); - - let task_type = params.task_type.unwrap(); - - let uniq_hash = calculate_hash(&metadata_str); - - let affected_rows = sqlx::query(query) - .bind(uuid_as_str) - .bind(metadata_str) - .bind(task_type) - .bind(uniq_hash) - .bind(scheduled_at_str) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&uuid).build(); - - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(task) -} - -async fn mysql_impl_update_task_state( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); - - let state_str: &str = params.state.unwrap().into(); - - let uuid = params.uuid.unwrap(); - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - - let affected_rows = sqlx::query(query) - .bind(state_str) - .bind(updated_at_str) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); - - let task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(task) -} - -async fn mysql_impl_fail_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); - - let id = params.task.unwrap().id; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); - - let error_message = params.error_message.unwrap(); - - let affected_rows = sqlx::query(query) - .bind(<&str>::from(FangTaskState::Failed)) - .bind(error_message) - .bind(updated_at) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&id).build(); - - let failed_task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(failed_task) -} - -async fn mysql_impl_retry_task( - query: &str, - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - let now = Utc::now(); - let now_str = format!("{}", now.format("%F %T%.f+00")); - - let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); - let retries = params.task.unwrap().retries + 1; - - let uuid = params.task.unwrap().id; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - - let error = params.error_message.unwrap(); - - let affected_rows = sqlx::query(query) - .bind(error) - .bind(retries) - .bind(scheduled_at_str) - .bind(now_str) - .bind(&*uuid_as_text) - .execute(pool) - .await? - .rows_affected(); - - if affected_rows != 1 { - return Err(AsyncQueueError::ResultError { - expected: 1, - found: affected_rows, - }); - } - - let query_params = QueryParams::builder().uuid(&uuid).build(); - - let failed_task: Task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_MYSQL, pool, query_params).await?; - - Ok(failed_task) -} - -async fn mysql_any_impl_insert_task_if_not_exists( - queries: (&str, &str), - pool: &Pool, - params: QueryParams<'_>, -) -> Result { - match general_any_impl_find_task_by_uniq_hash(queries.0, pool, ¶ms).await { - Some(task) => Ok(task), - None => mysql_impl_insert_task_uniq(queries.1, pool, params).await, - } -} diff --git a/fang/src/asynk/backend_sqlx/postgres.rs b/fang/src/asynk/backend_sqlx/postgres.rs index a5185a1a..5afe304a 100644 --- a/fang/src/asynk/backend_sqlx/postgres.rs +++ b/fang/src/asynk/backend_sqlx/postgres.rs @@ -24,40 +24,105 @@ const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/retry_ #[derive(Debug, Clone)] pub(super) struct BackendSqlXPg {} +use chrono::DateTime; +use chrono::Utc; +use sqlx::postgres::PgRow; +use sqlx::FromRow; +use sqlx::Pool; +use sqlx::Postgres; +use sqlx::Row; +use uuid::Uuid; use SqlXQuery as Q; +use super::FangQueryable; +use super::{QueryParams, Res, SqlXQuery}; use crate::AsyncQueueError; +use crate::FangTaskState; +use crate::Task; -use super::general_any_impl_fail_task; -use super::general_any_impl_fetch_task_type; -use super::general_any_impl_find_task_by_id; -use super::general_any_impl_insert_task; -use super::general_any_impl_insert_task_if_not_exists; -use super::general_any_impl_remove_all_scheduled_tasks; -use super::general_any_impl_remove_all_task; -use super::general_any_impl_remove_task; -use super::general_any_impl_remove_task_by_metadata; -use super::general_any_impl_remove_task_type; -use super::general_any_impl_retry_task; -use super::general_any_impl_update_task_state; -use super::{QueryParams, Res, SqlXQuery}; -use sqlx::{Any, Pool}; +impl<'a> FromRow<'a, PgRow> for Task { + fn from_row(row: &'a PgRow) -> Result { + let uuid_as_text: &str = row.get("id"); + + let id = Uuid::parse_str(uuid_as_text).unwrap(); + + let raw: &str = row.get("metadata"); // will work if database cast json to string + let raw = raw.replace('\\', ""); + + // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting + let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let error_message: Option = row.get("error_message"); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let uniq_hash: Option = row.get("uniq_hash"); + + let retries: i32 = row.get("retries"); + + let scheduled_at_str: &str = row.get("scheduled_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let created_at_str: &str = row.get("created_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let updated_at_str: &str = row.get("updated_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + +impl FangQueryable for BackendSqlXPg {} impl BackendSqlXPg { pub(super) async fn execute_query( query: SqlXQuery, - pool: &Pool, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match query { Q::InsertTask => { - let task = - general_any_impl_insert_task(INSERT_TASK_QUERY_POSTGRES, pool, params).await?; + let task = >::insert_task( + INSERT_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::UpdateTaskState => { - let task = general_any_impl_update_task_state( + let task = >::update_task_state( UPDATE_TASK_STATE_QUERY_POSTGRES, pool, params, @@ -66,35 +131,37 @@ impl BackendSqlXPg { Ok(Res::Task(task)) } Q::FailTask => { - let task = - general_any_impl_fail_task(FAIL_TASK_QUERY_POSTGRES, pool, params).await?; + let task = >::fail_task( + FAIL_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::RemoveAllTask => { - let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_POSTGRES, pool).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveAllScheduledTask => { - let affected_rows = general_any_impl_remove_all_scheduled_tasks( - REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + let affected_rows = >::remove_all_task( + REMOVE_ALL_TASK_QUERY_POSTGRES, pool, ) .await?; Ok(Res::Bigint(affected_rows)) } - Q::RemoveTask => { + Q::RemoveAllScheduledTask => { let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_POSTGRES, pool, params).await?; + >::remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + pool, + ) + .await?; Ok(Res::Bigint(affected_rows)) } - Q::RemoveTaskByMetadata => { - let affected_rows = general_any_impl_remove_task_by_metadata( - REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + Q::RemoveTask => { + let affected_rows = >::remove_task( + REMOVE_TASK_QUERY_POSTGRES, pool, params, ) @@ -102,8 +169,19 @@ impl BackendSqlXPg { Ok(Res::Bigint(affected_rows)) } + Q::RemoveTaskByMetadata => { + let affected_rows = + >::remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } Q::RemoveTaskType => { - let affected_rows = general_any_impl_remove_task_type( + let affected_rows = >::remove_task_type( REMOVE_TASKS_TYPE_QUERY_POSTGRES, pool, params, @@ -113,25 +191,35 @@ impl BackendSqlXPg { Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { - let task = - general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_POSTGRES, pool, params) - .await?; + let task = >::fetch_task_type( + FETCH_TASK_TYPE_QUERY_POSTGRES, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::FindTaskById => { - let task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_POSTGRES, pool, params) - .await?; + let task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_POSTGRES, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::RetryTask => { - let task = - general_any_impl_retry_task(RETRY_TASK_QUERY_POSTGRES, pool, params).await?; + let task = >::retry_task( + RETRY_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::InsertTaskIfNotExists => { - let task = general_any_impl_insert_task_if_not_exists( + let task = >::insert_task_if_not_exists( ( FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, INSERT_TASK_UNIQ_QUERY_POSTGRES, diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs index af37a341..430c9605 100644 --- a/fang/src/asynk/backend_sqlx/sqlite.rs +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -20,39 +20,103 @@ const RETRY_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/retry_task #[derive(Debug, Clone)] pub(super) struct BackendSqlXSQLite {} +use super::FangQueryable; use super::{QueryParams, Res, SqlXQuery}; use crate::AsyncQueueError; -use sqlx::{Any, Pool}; +use crate::FangTaskState; +use crate::Task; +use chrono::{DateTime, Utc}; +use sqlx::sqlite::SqliteRow; +use sqlx::FromRow; +use sqlx::Pool; +use sqlx::Row; +use sqlx::Sqlite; +use uuid::Uuid; use SqlXQuery as Q; -use super::general_any_impl_fail_task; -use super::general_any_impl_fetch_task_type; -use super::general_any_impl_find_task_by_id; -use super::general_any_impl_insert_task; -use super::general_any_impl_insert_task_if_not_exists; -use super::general_any_impl_remove_all_scheduled_tasks; -use super::general_any_impl_remove_all_task; -use super::general_any_impl_remove_task; -use super::general_any_impl_remove_task_by_metadata; -use super::general_any_impl_remove_task_type; -use super::general_any_impl_retry_task; -use super::general_any_impl_update_task_state; +impl<'a> FromRow<'a, SqliteRow> for Task { + fn from_row(row: &'a SqliteRow) -> Result { + let uuid_as_text: &str = row.get("id"); + + let id = Uuid::parse_str(uuid_as_text).unwrap(); + + let raw: &str = row.get("metadata"); // will work if database cast json to string + let raw = raw.replace('\\', ""); + + // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting + let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let error_message: Option = row.get("error_message"); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let uniq_hash: Option = row.get("uniq_hash"); + + let retries: i32 = row.get("retries"); + + let scheduled_at_str: &str = row.get("scheduled_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let created_at_str: &str = row.get("created_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + let updated_at_str: &str = row.get("updated_at"); + + // This unwrap is safe because we know that the database returns the date in the correct format + let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") + .unwrap() + .into(); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + +impl FangQueryable for BackendSqlXSQLite {} impl BackendSqlXSQLite { pub(super) async fn execute_query( query: SqlXQuery, - pool: &Pool, + pool: &Pool, params: QueryParams<'_>, ) -> Result { match query { Q::InsertTask => { - let task = - general_any_impl_insert_task(INSERT_TASK_QUERY_SQLITE, pool, params).await?; + let task = >::insert_task( + INSERT_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::UpdateTaskState => { - let task = general_any_impl_update_task_state( + let task = >::update_task_state( UPDATE_TASK_STATE_QUERY_SQLITE, pool, params, @@ -61,34 +125,37 @@ impl BackendSqlXSQLite { Ok(Res::Task(task)) } Q::FailTask => { - let task = general_any_impl_fail_task(FAIL_TASK_QUERY_SQLITE, pool, params).await?; + let task = >::fail_task( + FAIL_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::RemoveAllTask => { - let affected_rows = - general_any_impl_remove_all_task(REMOVE_ALL_TASK_QUERY_SQLITE, pool).await?; - - Ok(Res::Bigint(affected_rows)) - } - Q::RemoveAllScheduledTask => { - let affected_rows = general_any_impl_remove_all_scheduled_tasks( - REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + let affected_rows = >::remove_all_task( + REMOVE_ALL_TASK_QUERY_SQLITE, pool, ) .await?; Ok(Res::Bigint(affected_rows)) } - Q::RemoveTask => { + Q::RemoveAllScheduledTask => { let affected_rows = - general_any_impl_remove_task(REMOVE_TASK_QUERY_SQLITE, pool, params).await?; + >::remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + pool, + ) + .await?; Ok(Res::Bigint(affected_rows)) } - Q::RemoveTaskByMetadata => { - let affected_rows = general_any_impl_remove_task_by_metadata( - REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + Q::RemoveTask => { + let affected_rows = >::remove_task( + REMOVE_TASK_QUERY_SQLITE, pool, params, ) @@ -96,33 +163,57 @@ impl BackendSqlXSQLite { Ok(Res::Bigint(affected_rows)) } - Q::RemoveTaskType => { + Q::RemoveTaskByMetadata => { let affected_rows = - general_any_impl_remove_task_type(REMOVE_TASKS_TYPE_QUERY_SQLITE, pool, params) - .await?; + >::remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = >::remove_task_type( + REMOVE_TASKS_TYPE_QUERY_SQLITE, + pool, + params, + ) + .await?; Ok(Res::Bigint(affected_rows)) } Q::FetchTaskType => { - let task = - general_any_impl_fetch_task_type(FETCH_TASK_TYPE_QUERY_SQLITE, pool, params) - .await?; + let task = >::fetch_task_type( + FETCH_TASK_TYPE_QUERY_SQLITE, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::FindTaskById => { - let task = - general_any_impl_find_task_by_id(FIND_TASK_BY_ID_QUERY_SQLITE, pool, params) - .await?; + let task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_SQLITE, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::RetryTask => { - let task = - general_any_impl_retry_task(RETRY_TASK_QUERY_SQLITE, pool, params).await?; + let task = >::retry_task( + RETRY_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; Ok(Res::Task(task)) } Q::InsertTaskIfNotExists => { - let task = general_any_impl_insert_task_if_not_exists( + let task = >::insert_task_if_not_exists( ( FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, INSERT_TASK_UNIQ_QUERY_SQLITE, From 890531a7984a7d731732003a8bbbbaca7aa338f0 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Mon, 15 Apr 2024 15:02:53 +0200 Subject: [PATCH 71/90] relocate comment --- fang/src/asynk/backend_sqlx.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 10e4dae4..b61e8351 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -141,9 +141,6 @@ pub(crate) enum SqlXQuery { InsertTaskIfNotExists, } -// Unwraps by QueryParams are safe because the responsibility is of the caller -// and the caller is the library itself - use crate::AsyncQueueError; use crate::AsyncRunnable; use crate::FangTaskState; @@ -174,6 +171,8 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { + // Unwraps by QueryParams are safe because the responsibility is of the caller + // and the caller is the library itself let task_type = params.task_type.unwrap(); let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); From fe69c669e033d48484c141b771d2a2315921c15c Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Mon, 15 Apr 2024 15:32:39 +0200 Subject: [PATCH 72/90] remove `backend` field from `AsyncQueue` --- fang/src/asynk/async_queue.rs | 58 +++++++++++++++++++--------------- fang/src/asynk/backend_sqlx.rs | 2 -- 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 013670aa..942109b2 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -191,6 +191,17 @@ impl InternalPool { _ => panic!("Not a SqlitePool!"), } } + + pub(crate) fn backend(&self) -> BackendSqlX { + match *self { + #[cfg(feature = "asynk-postgres")] + InternalPool::Pg(_) => BackendSqlX::Pg, + #[cfg(feature = "asynk-mysql")] + InternalPool::MySql(_) => BackendSqlX::MySql, + #[cfg(feature = "asynk-sqlite")] + InternalPool::Sqlite(_) => BackendSqlX::Sqlite, + } + } } #[derive(TypedBuilder, Debug, Clone)] @@ -203,8 +214,6 @@ pub struct AsyncQueue { max_pool_size: u32, #[builder(default = false, setter(skip))] connected: bool, - #[builder(default = BackendSqlX::NoBackend, setter(skip))] - backend: BackendSqlX, } #[cfg(test)] @@ -287,7 +296,6 @@ impl AsyncQueue { let (backend, pool) = get_backend(kind, &self.uri, self.max_pool_size).await?; self.pool = Some(pool); - self.backend = backend; self.connected = true; Ok(()) } @@ -420,8 +428,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().uuid(id).build(); - let task = self - .backend + let task = pool + .backend() .execute_query(SqlXQuery::FindTaskById, pool, query_params) .await? .unwrap_task(); @@ -437,7 +445,7 @@ impl AsyncQueueable for AsyncQueue { // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); - let task = Self::fetch_and_touch_task_query(pool, &self.backend, task_type).await?; + let task = Self::fetch_and_touch_task_query(pool, &pool.backend(), task_type).await?; Ok(task) } @@ -451,7 +459,7 @@ impl AsyncQueueable for AsyncQueue { let task = if !task.uniq() { Self::insert_task_query( pool, - &self.backend, + &pool.backend(), &metadata, &task.task_type(), &Utc::now(), @@ -460,7 +468,7 @@ impl AsyncQueueable for AsyncQueue { } else { Self::insert_task_if_not_exist_query( pool, - &self.backend, + &pool.backend(), &metadata, &task.task_type(), &Utc::now(), @@ -476,7 +484,7 @@ impl AsyncQueueable for AsyncQueue { // this unwrap is safe because we check if connection is established let pool = self.pool.as_ref().unwrap(); - let task = Self::schedule_task_query(pool, &self.backend, task).await?; + let task = Self::schedule_task_query(pool, &pool.backend(), task).await?; Ok(task) } @@ -488,8 +496,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().build(); - let result = self - .backend + let result = pool + .backend() .execute_query(SqlXQuery::RemoveAllTask, pool, query_params) .await? .unwrap_u64(); @@ -504,8 +512,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().build(); - let result = self - .backend + let result = pool + .backend() .execute_query(SqlXQuery::RemoveAllScheduledTask, pool, query_params) .await? .unwrap_u64(); @@ -519,8 +527,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().uuid(id).build(); - let result = self - .backend + let result = pool + .backend() .execute_query(SqlXQuery::RemoveTask, pool, query_params) .await? .unwrap_u64(); @@ -538,8 +546,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().runnable(task).build(); - let result = self - .backend + let result = pool + .backend() .execute_query(SqlXQuery::RemoveTaskByMetadata, pool, query_params) .await? .unwrap_u64(); @@ -556,8 +564,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().task_type(task_type).build(); - let result = self - .backend + let result = pool + .backend() .execute_query(SqlXQuery::RemoveTaskType, pool, query_params) .await? .unwrap_u64(); @@ -575,8 +583,8 @@ impl AsyncQueueable for AsyncQueue { let query_params = QueryParams::builder().uuid(&task.id).state(state).build(); - let task = self - .backend + let task = pool + .backend() .execute_query(SqlXQuery::UpdateTaskState, pool, query_params) .await? .unwrap_task(); @@ -597,8 +605,8 @@ impl AsyncQueueable for AsyncQueue { .task(task) .build(); - let failed_task = self - .backend + let failed_task = pool + .backend() .execute_query(SqlXQuery::FailTask, pool, query_params) .await? .unwrap_task(); @@ -622,8 +630,8 @@ impl AsyncQueueable for AsyncQueue { .task(task) .build(); - let failed_task = self - .backend + let failed_task = pool + .backend() .execute_query(SqlXQuery::RetryTask, pool, query_params) .await? .unwrap_task(); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index b61e8351..b1e2598e 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -38,8 +38,6 @@ pub(crate) enum BackendSqlX { #[cfg(feature = "asynk-mysql")] MySql, - - NoBackend, } #[allow(dead_code)] From 201bc3b92d495cf80d3b63395b6cb19f1aa7322f Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Mon, 15 Apr 2024 15:41:43 +0200 Subject: [PATCH 73/90] fix clippy --- fang/src/asynk/async_queue.rs | 12 ++++++------ fang/src/asynk/backend_sqlx.rs | 6 ++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 942109b2..49303213 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -239,11 +239,11 @@ use std::env; use super::backend_sqlx::BackendSqlX; -async fn get_backend( +async fn get_pool( kind: AnyKind, _uri: &str, _max_connections: u32, -) -> Result<(BackendSqlX, InternalPool), AsyncQueueError> { +) -> Result { match kind { #[cfg(feature = "asynk-postgres")] AnyKind::Postgres => { @@ -252,7 +252,7 @@ async fn get_backend( .connect(_uri) .await?; - Ok((BackendSqlX::Pg, InternalPool::Pg(pool))) + Ok(InternalPool::Pg(pool)) } #[cfg(feature = "asynk-mysql")] AnyKind::MySql => { @@ -261,7 +261,7 @@ async fn get_backend( .connect(_uri) .await?; - Ok((BackendSqlX::MySql, InternalPool::MySql(pool))) + Ok(InternalPool::MySql(pool)) } #[cfg(feature = "asynk-sqlite")] AnyKind::Sqlite => { @@ -270,7 +270,7 @@ async fn get_backend( .connect(_uri) .await?; - Ok((BackendSqlX::Sqlite, InternalPool::Sqlite(pool))) + Ok(InternalPool::Sqlite(pool)) } #[allow(unreachable_patterns)] _ => panic!("Not a valid backend"), @@ -293,7 +293,7 @@ impl AsyncQueue { let kind: AnyKind = self.uri.parse::()?.kind(); - let (backend, pool) = get_backend(kind, &self.uri, self.max_pool_size).await?; + let pool = get_pool(kind, &self.uri, self.max_pool_size).await?; self.pool = Some(pool); self.connected = true; diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index b1e2598e..8f9079f6 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -92,7 +92,7 @@ impl BackendSqlX { _pool: &InternalPool, _params: QueryParams<'_>, ) -> Result { - match self { + match *self { #[cfg(feature = "asynk-postgres")] BackendSqlX::Pg => { BackendSqlXPg::execute_query(_query, _pool.unwrap_pg_pool(), _params).await @@ -105,20 +105,18 @@ impl BackendSqlX { BackendSqlX::MySql => { BackendSqlXMySQL::execute_query(_query, _pool.unwrap_mysql_pool(), _params).await } - _ => unreachable!(), } } // I think it is useful to have this method, although it is not used pub(crate) fn _name(&self) -> &str { - match self { + match *self { #[cfg(feature = "asynk-postgres")] BackendSqlX::Pg => BackendSqlXPg::_name(), #[cfg(feature = "asynk-sqlite")] BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), #[cfg(feature = "asynk-mysql")] BackendSqlX::MySql => BackendSqlXMySQL::_name(), - _ => unreachable!(), } } } From 9f540b770c2937d2cb6b45f69447006e40730549 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Mon, 15 Apr 2024 15:57:01 +0200 Subject: [PATCH 74/90] worst clippy fix of all time --- fang/src/asynk/backend_sqlx.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 8f9079f6..144e871d 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -38,6 +38,10 @@ pub(crate) enum BackendSqlX { #[cfg(feature = "asynk-mysql")] MySql, + + #[cfg(not(any(feature = "asynk-postgres", feature = "asynk-sqlite", feature = "asynk-mysql")))] + #[allow(dead_code)] + Dummy, } #[allow(dead_code)] @@ -105,6 +109,8 @@ impl BackendSqlX { BackendSqlX::MySql => { BackendSqlXMySQL::execute_query(_query, _pool.unwrap_mysql_pool(), _params).await } + #[cfg(not(any(feature = "asynk-postgres", feature = "asynk-sqlite", feature = "asynk-mysql")))] + BackendSqlX::Dummy => unreachable!(), } } @@ -117,6 +123,8 @@ impl BackendSqlX { BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), #[cfg(feature = "asynk-mysql")] BackendSqlX::MySql => BackendSqlXMySQL::_name(), + #[cfg(not(any(feature = "asynk-postgres", feature = "asynk-sqlite", feature = "asynk-mysql")))] + BackendSqlX::Dummy => unreachable!(), } } } From 0dd461f03365b2e8c5719cb541f8992d1b763266 Mon Sep 17 00:00:00 2001 From: Dopplerian Date: Mon, 15 Apr 2024 16:00:00 +0200 Subject: [PATCH 75/90] fmt fix --- fang/src/asynk/backend_sqlx.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 144e871d..b8be9bbe 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -39,7 +39,11 @@ pub(crate) enum BackendSqlX { #[cfg(feature = "asynk-mysql")] MySql, - #[cfg(not(any(feature = "asynk-postgres", feature = "asynk-sqlite", feature = "asynk-mysql")))] + #[cfg(not(any( + feature = "asynk-postgres", + feature = "asynk-sqlite", + feature = "asynk-mysql" + )))] #[allow(dead_code)] Dummy, } @@ -109,7 +113,11 @@ impl BackendSqlX { BackendSqlX::MySql => { BackendSqlXMySQL::execute_query(_query, _pool.unwrap_mysql_pool(), _params).await } - #[cfg(not(any(feature = "asynk-postgres", feature = "asynk-sqlite", feature = "asynk-mysql")))] + #[cfg(not(any( + feature = "asynk-postgres", + feature = "asynk-sqlite", + feature = "asynk-mysql" + )))] BackendSqlX::Dummy => unreachable!(), } } @@ -123,7 +131,11 @@ impl BackendSqlX { BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), #[cfg(feature = "asynk-mysql")] BackendSqlX::MySql => BackendSqlXMySQL::_name(), - #[cfg(not(any(feature = "asynk-postgres", feature = "asynk-sqlite", feature = "asynk-mysql")))] + #[cfg(not(any( + feature = "asynk-postgres", + feature = "asynk-sqlite", + feature = "asynk-mysql" + )))] BackendSqlX::Dummy => unreachable!(), } } From edf744a7e44bec23fd526bbda19c6c8b664b540f Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:30:12 +0200 Subject: [PATCH 76/90] use variant NoBackend instead Option --- fang/src/asynk/async_queue.rs | 124 +++++++++++++++------------------- 1 file changed, 56 insertions(+), 68 deletions(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 49303213..a89bc1f2 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -162,6 +162,7 @@ pub(crate) enum InternalPool { MySql(MySqlPool), #[cfg(feature = "asynk-sqlite")] Sqlite(SqlitePool), + NoBackend, } impl InternalPool { @@ -192,22 +193,23 @@ impl InternalPool { } } - pub(crate) fn backend(&self) -> BackendSqlX { + pub(crate) fn backend(&self) -> Result { match *self { #[cfg(feature = "asynk-postgres")] - InternalPool::Pg(_) => BackendSqlX::Pg, + InternalPool::Pg(_) => Ok(BackendSqlX::Pg), #[cfg(feature = "asynk-mysql")] - InternalPool::MySql(_) => BackendSqlX::MySql, + InternalPool::MySql(_) => Ok(BackendSqlX::MySql), #[cfg(feature = "asynk-sqlite")] - InternalPool::Sqlite(_) => BackendSqlX::Sqlite, + InternalPool::Sqlite(_) => Ok(BackendSqlX::Sqlite), + InternalPool::NoBackend => Err(AsyncQueueError::NotConnectedError), } } } #[derive(TypedBuilder, Debug, Clone)] pub struct AsyncQueue { - #[builder(default=None, setter(skip))] - pool: Option, + #[builder(default=InternalPool::NoBackend, setter(skip))] + pool: InternalPool, #[builder(setter(into))] uri: String, #[builder(setter(into))] @@ -272,8 +274,7 @@ async fn get_pool( Ok(InternalPool::Sqlite(pool)) } - #[allow(unreachable_patterns)] - _ => panic!("Not a valid backend"), + _ => Err(AsyncQueueError::NotConnectedError), } } @@ -295,7 +296,7 @@ impl AsyncQueue { let pool = get_pool(kind, &self.uri, self.max_pool_size).await?; - self.pool = Some(pool); + self.pool = pool; self.connected = true; Ok(()) } @@ -424,12 +425,13 @@ impl AsyncQueue { impl AsyncQueueable for AsyncQueue { async fn find_task_by_id(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + + let backend = pool.backend()?; let query_params = QueryParams::builder().uuid(id).build(); - let task = pool - .backend() + let task = backend .execute_query(SqlXQuery::FindTaskById, pool, query_params) .await? .unwrap_task(); @@ -443,9 +445,11 @@ impl AsyncQueueable for AsyncQueue { ) -> Result, AsyncQueueError> { self.check_if_connection()?; // this unwrap is safe because we check if connection is established - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; - let task = Self::fetch_and_touch_task_query(pool, &pool.backend(), task_type).await?; + let backend = pool.backend()?; + + let task = Self::fetch_and_touch_task_query(pool, &backend, task_type).await?; Ok(task) } @@ -453,22 +457,18 @@ impl AsyncQueueable for AsyncQueue { async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; + let metadata = serde_json::to_value(task)?; let task = if !task.uniq() { - Self::insert_task_query( - pool, - &pool.backend(), - &metadata, - &task.task_type(), - &Utc::now(), - ) - .await? + Self::insert_task_query(pool, &backend, &metadata, &task.task_type(), &Utc::now()) + .await? } else { Self::insert_task_if_not_exist_query( pool, - &pool.backend(), + &backend, &metadata, &task.task_type(), &Utc::now(), @@ -482,9 +482,10 @@ impl AsyncQueueable for AsyncQueue { async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; - let task = Self::schedule_task_query(pool, &pool.backend(), task).await?; + let task = Self::schedule_task_query(pool, &backend, task).await?; Ok(task) } @@ -492,12 +493,12 @@ impl AsyncQueueable for AsyncQueue { async fn remove_all_tasks(&mut self) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder().build(); - let result = pool - .backend() + let result = backend .execute_query(SqlXQuery::RemoveAllTask, pool, query_params) .await? .unwrap_u64(); @@ -508,12 +509,13 @@ impl AsyncQueueable for AsyncQueue { async fn remove_all_scheduled_tasks(&mut self) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + + let backend = pool.backend()?; let query_params = QueryParams::builder().build(); - let result = pool - .backend() + let result = backend .execute_query(SqlXQuery::RemoveAllScheduledTask, pool, query_params) .await? .unwrap_u64(); @@ -523,12 +525,12 @@ impl AsyncQueueable for AsyncQueue { async fn remove_task(&mut self, id: &Uuid) -> Result { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder().uuid(id).build(); - let result = pool - .backend() + let result = backend .execute_query(SqlXQuery::RemoveTask, pool, query_params) .await? .unwrap_u64(); @@ -542,12 +544,12 @@ impl AsyncQueueable for AsyncQueue { ) -> Result { if task.uniq() { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder().runnable(task).build(); - let result = pool - .backend() + let result = backend .execute_query(SqlXQuery::RemoveTaskByMetadata, pool, query_params) .await? .unwrap_u64(); @@ -560,12 +562,12 @@ impl AsyncQueueable for AsyncQueue { async fn remove_tasks_type(&mut self, task_type: &str) -> Result { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder().task_type(task_type).build(); - let result = pool - .backend() + let result = backend .execute_query(SqlXQuery::RemoveTaskType, pool, query_params) .await? .unwrap_u64(); @@ -579,12 +581,12 @@ impl AsyncQueueable for AsyncQueue { state: FangTaskState, ) -> Result { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder().uuid(&task.id).state(state).build(); - let task = pool - .backend() + let task = backend .execute_query(SqlXQuery::UpdateTaskState, pool, query_params) .await? .unwrap_task(); @@ -598,15 +600,15 @@ impl AsyncQueueable for AsyncQueue { error_message: &str, ) -> Result { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder() .error_message(error_message) .task(task) .build(); - let failed_task = pool - .backend() + let failed_task = backend .execute_query(SqlXQuery::FailTask, pool, query_params) .await? .unwrap_task(); @@ -622,7 +624,8 @@ impl AsyncQueueable for AsyncQueue { ) -> Result { self.check_if_connection()?; - let pool = self.pool.as_ref().unwrap(); + let pool = &self.pool; + let backend = pool.backend()?; let query_params = QueryParams::builder() .backoff_seconds(backoff_seconds) @@ -630,8 +633,7 @@ impl AsyncQueueable for AsyncQueue { .task(task) .build(); - let failed_task = pool - .backend() + let failed_task = backend .execute_query(SqlXQuery::RetryTask, pool, query_params) .await? .unwrap_task(); @@ -662,14 +664,7 @@ impl AsyncQueue { let create_query: &str = &format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - let mut conn = res - .pool - .as_mut() - .unwrap() - .unwrap_pg_pool() - .acquire() - .await - .unwrap(); + let mut conn = res.pool.unwrap_pg_pool().acquire().await.unwrap(); log::info!("Deleting database {db_name} ..."); conn.execute(delete_query).await.unwrap(); @@ -688,7 +683,7 @@ impl AsyncQueue { log::info!("Database {db_name} created !!"); res.connected = false; - res.pool = None; + res.pool = InternalPool::NoBackend; res.uri = format!("{}/{}", base_url, db_name); res.connect().await.unwrap(); @@ -757,14 +752,7 @@ impl AsyncQueue { let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); - let mut conn = res - .pool - .as_mut() - .unwrap() - .unwrap_mysql_pool() - .acquire() - .await - .unwrap(); + let mut conn = res.pool.unwrap_mysql_pool().acquire().await.unwrap(); log::info!("Deleting database {db_name} ..."); conn.execute(delete_query).await.unwrap(); @@ -783,7 +771,7 @@ impl AsyncQueue { log::info!("Database {db_name} created !!"); res.connected = false; - res.pool = None; + res.pool = InternalPool::NoBackend; res.uri = format!("{}/{}", base_url, db_name); res.connect().await.unwrap(); From f0ae9fe70ae76c2d62fa6fb634402e886834160b Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:34:17 +0200 Subject: [PATCH 77/90] fix clippy --- fang/src/asynk/async_queue.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index a89bc1f2..5c76052e 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -274,7 +274,8 @@ async fn get_pool( Ok(InternalPool::Sqlite(pool)) } - _ => Err(AsyncQueueError::NotConnectedError), + #[allow(unreachable_patterns)] + _ => Err(AsyncQueueError::ConnectionError), } } From ce47e61a7e5994cc4e3c5dd051559109975cab7c Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:38:39 +0200 Subject: [PATCH 78/90] delete dummy variant --- fang/src/asynk/backend_sqlx.rs | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index b8be9bbe..8f9079f6 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -38,14 +38,6 @@ pub(crate) enum BackendSqlX { #[cfg(feature = "asynk-mysql")] MySql, - - #[cfg(not(any( - feature = "asynk-postgres", - feature = "asynk-sqlite", - feature = "asynk-mysql" - )))] - #[allow(dead_code)] - Dummy, } #[allow(dead_code)] @@ -113,12 +105,6 @@ impl BackendSqlX { BackendSqlX::MySql => { BackendSqlXMySQL::execute_query(_query, _pool.unwrap_mysql_pool(), _params).await } - #[cfg(not(any( - feature = "asynk-postgres", - feature = "asynk-sqlite", - feature = "asynk-mysql" - )))] - BackendSqlX::Dummy => unreachable!(), } } @@ -131,12 +117,6 @@ impl BackendSqlX { BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), #[cfg(feature = "asynk-mysql")] BackendSqlX::MySql => BackendSqlXMySQL::_name(), - #[cfg(not(any( - feature = "asynk-postgres", - feature = "asynk-sqlite", - feature = "asynk-mysql" - )))] - BackendSqlX::Dummy => unreachable!(), } } } From baef61e43362c06c8b68c8445b4545ba79aff28f Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Mon, 15 Apr 2024 19:33:46 +0200 Subject: [PATCH 79/90] changing sqlite and mysql migrations --- fang/Cargo.toml | 4 +- .../up.sql | 6 +- .../up.sql | 13 ++-- fang/src/asynk/backend_sqlx.rs | 34 ++++---- fang/src/asynk/backend_sqlx/mysql.rs | 78 ++++++++++++------- fang/src/asynk/backend_sqlx/sqlite.rs | 18 ++--- fang/src/asynk/queries_postgres/fail_task.sql | 2 +- .../queries_postgres/fetch_task_type.sql | 2 +- .../asynk/queries_postgres/insert_task.sql | 2 +- .../queries_postgres/insert_task_uniq.sql | 2 +- .../remove_all_scheduled_tasks.sql | 2 +- .../src/asynk/queries_postgres/retry_task.sql | 2 +- .../queries_postgres/update_task_state.sql | 2 +- fang/src/asynk/queries_sqlite/insert_task.sql | 2 +- fang/src/blocking/mysql_schema.rs | 9 +-- fang/src/blocking/sqlite_schema.rs | 6 +- fang/src/lib.rs | 69 ---------------- 17 files changed, 100 insertions(+), 153 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 39caf2cb..a38cd19d 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -35,7 +35,7 @@ migrations = ["dep:diesel_migrations"] [dev-dependencies] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} #console-subscriber = "0.2.0" # for tokio tracing debug [dependencies] @@ -52,7 +52,7 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "runtime-tokio-rustls"], optional = true} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "runtime-tokio-rustls"], optional = true} [dependencies.diesel] version = "2.1" diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 4fd52060..5a695a97 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -17,9 +17,9 @@ CREATE TABLE fang_tasks ( task_type VARCHAR(255) NOT NULL DEFAULT 'common', -- TEXT type can not have default value, stupid MySQL policy uniq_hash VARCHAR(64), retries INTEGER NOT NULL DEFAULT 0, - scheduled_at VARCHAR(32) NOT NULL DEFAULT(CONCAT(CURRENT_TIMESTAMP, '.000000000+00')), - created_at VARCHAR(32) NOT NULL DEFAULT (CONCAT(CURRENT_TIMESTAMP , '.000000000+00')), - updated_at VARCHAR(32) NOT NULL DEFAULT (CONCAT(CURRENT_TIMESTAMP , '.000000000+00')) + scheduled_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); diff --git a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 2dc9b9e1..4c88cc79 100644 --- a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -14,12 +14,13 @@ CREATE TABLE fang_tasks ( task_type TEXT NOT NULL DEFAULT 'common', uniq_hash CHAR(64), retries INTEGER NOT NULL DEFAULT 0, - -- The datetime() function returns the date and time as text in this formats: YYYY-MM-DD HH:MM:SS. - -- https://www.sqlite.org/lang_datefunc.html - scheduled_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00'), - -- why timestamps are texts ? https://www.sqlite.org/datatype3.html - created_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00'), - updated_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00') + -- scheduled_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00'), + + -- Timestamps are stored as the number of seconds since the Unix epoch ('1970-01-01 00:00:00 UTC'). + + scheduled_at INTEGER NOT NULL DEFAULT (unixepoch('now')), + created_at INTEGER NOT NULL DEFAULT (unixepoch('now')), + updated_at INTEGER NOT NULL DEFAULT (unixepoch('now')) ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 8f9079f6..43e9cfff 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -158,6 +158,7 @@ where for<'r> std::string::String: Encode<'r, DB> + Type, for<'r> &'r str: Encode<'r, DB> + Type, for<'r> i32: Encode<'r, DB> + Type, + for<'r> i64: Encode<'r, DB> + Type, for<'r> &'r Pool: Executor<'r, Database = DB>, for<'r> >::Arguments: IntoArguments<'r, DB>, ::QueryResult: Into, @@ -171,11 +172,11 @@ where // and the caller is the library itself let task_type = params.task_type.unwrap(); - let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); + let now = Utc::now().timestamp(); let task: Task = sqlx::query_as(query) .bind(task_type) - .bind(now_str) + .bind(now) .fetch_one(pool) .await?; @@ -224,10 +225,13 @@ where params: QueryParams<'_>, ) -> Result { let now = Utc::now(); - let now_str = format!("{}", now.format("%F %T%.f+00")); + let now_i64 = now.timestamp(); let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + + // shadowing in order to not change a lot depending on types + let scheduled_at = scheduled_at.timestamp(); + let now = now_i64; let retries = params.task.unwrap().retries + 1; let mut buffer = Uuid::encode_buffer(); @@ -243,8 +247,8 @@ where let failed_task: Task = sqlx::query_as(query) .bind(error) .bind(retries) - .bind(scheduled_at_str) - .bind(now_str) + .bind(scheduled_at) + .bind(now) .bind(&*uuid_as_text) .fetch_one(pool) .await?; @@ -264,7 +268,7 @@ where let metadata = params.metadata.unwrap(); let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + let scheduled_at = params.scheduled_at.unwrap().timestamp(); let task_type = params.task_type.unwrap(); @@ -275,7 +279,7 @@ where .bind(metadata_str) .bind(task_type) .bind(uniq_hash) - .bind(scheduled_at_str) + .bind(scheduled_at) .fetch_one(pool) .await?; Ok(task) @@ -290,7 +294,7 @@ where let mut buffer = Uuid::encode_buffer(); let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + let scheduled_at_i64 = params.scheduled_at.unwrap().timestamp(); let metadata_str = params.metadata.unwrap().to_string(); let task_type = params.task_type.unwrap(); @@ -299,7 +303,7 @@ where .bind(uuid_as_str) .bind(metadata_str) .bind(task_type) - .bind(scheduled_at_str) + .bind(scheduled_at_i64) .fetch_one(pool) .await?; @@ -311,7 +315,7 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { - let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + let updated_at = Utc::now().timestamp(); let state_str: &str = params.state.unwrap().into(); @@ -322,7 +326,7 @@ where let task: Task = sqlx::query_as(query) .bind(state_str) - .bind(updated_at_str) + .bind(updated_at) .bind(&*uuid_as_text) .fetch_one(pool) .await?; @@ -335,7 +339,7 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { - let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + let updated_at = Utc::now().timestamp(); let id = params.task.unwrap().id; @@ -369,13 +373,13 @@ where query: &str, pool: &Pool, ) -> Result { - let now_str = format!("{}", Utc::now().format("%F %T%.f+00")); + let now = Utc::now().timestamp(); // This converts QueryResult to AnyQueryResult and then to u64 // do not delete into() method and do not delete Into trait bound Ok(sqlx::query(query) - .bind(now_str) + .bind(now) .execute(pool) .await? .into() diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs index f7c6044a..6c92daf3 100644 --- a/fang/src/asynk/backend_sqlx/mysql.rs +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -59,26 +59,11 @@ impl<'a> FromRow<'a, MySqlRow> for Task { let retries: i32 = row.get("retries"); - let scheduled_at_str: &str = row.get("scheduled_at"); + let scheduled_at: DateTime = row.get("scheduled_at"); - // This unwrap is safe because we know that the database returns the date in the correct format - let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") - .unwrap() - .into(); - - let created_at_str: &str = row.get("created_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let created_at: DateTime = row.get("created_at"); - let updated_at_str: &str = row.get("updated_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let updated_at: DateTime = row.get("updated_at"); Ok(Task::builder() .id(id) @@ -105,7 +90,7 @@ impl FangQueryable for BackendSqlXMySQL { let mut buffer = Uuid::encode_buffer(); let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + let scheduled_at = params.scheduled_at.unwrap(); let metadata_str = params.metadata.unwrap().to_string(); let task_type = params.task_type.unwrap(); @@ -115,7 +100,7 @@ impl FangQueryable for BackendSqlXMySQL { .bind(uuid_as_str) .bind(metadata_str) .bind(task_type) - .bind(scheduled_at_str) + .bind(scheduled_at) .execute(pool) .await?, ) @@ -145,7 +130,7 @@ impl FangQueryable for BackendSqlXMySQL { pool: &Pool, params: QueryParams<'_>, ) -> Result { - let updated_at_str = format!("{}", Utc::now().format("%F %T%.f+00")); + let updated_at = Utc::now(); let state_str: &str = params.state.unwrap().into(); @@ -157,7 +142,7 @@ impl FangQueryable for BackendSqlXMySQL { let affected_rows = Into::::into( sqlx::query(query) .bind(state_str) - .bind(updated_at_str) + .bind(updated_at) .bind(&*uuid_as_text) .execute(pool) .await?, @@ -195,7 +180,8 @@ impl FangQueryable for BackendSqlXMySQL { let metadata = params.metadata.unwrap(); let metadata_str = metadata.to_string(); - let scheduled_at_str = format!("{}", params.scheduled_at.unwrap().format("%F %T%.f+00")); + + let scheduled_at = params.scheduled_at.unwrap(); let task_type = params.task_type.unwrap(); @@ -207,7 +193,7 @@ impl FangQueryable for BackendSqlXMySQL { .bind(metadata_str) .bind(task_type) .bind(uniq_hash) - .bind(scheduled_at_str) + .bind(scheduled_at) .execute(pool) .await?, ) @@ -237,7 +223,7 @@ impl FangQueryable for BackendSqlXMySQL { pool: &Pool, params: QueryParams<'_>, ) -> Result { - let updated_at = format!("{}", Utc::now().format("%F %T%.f+00")); + let updated_at = Utc::now(); let id = params.task.unwrap().id; @@ -282,10 +268,9 @@ impl FangQueryable for BackendSqlXMySQL { params: QueryParams<'_>, ) -> Result { let now = Utc::now(); - let now_str = format!("{}", now.format("%F %T%.f+00")); let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); - let scheduled_at_str = format!("{}", scheduled_at.format("%F %T%.f+00")); + let retries = params.task.unwrap().retries + 1; let uuid = params.task.unwrap().id; @@ -299,8 +284,8 @@ impl FangQueryable for BackendSqlXMySQL { sqlx::query(query) .bind(error) .bind(retries) - .bind(scheduled_at_str) - .bind(now_str) + .bind(scheduled_at) + .bind(now) .bind(&*uuid_as_text) .execute(pool) .await?, @@ -365,6 +350,41 @@ impl FangQueryable for BackendSqlXMySQL { Ok(task) } + + async fn fetch_task_type( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + // Unwraps by QueryParams are safe because the responsibility is of the caller + // and the caller is the library itself + let task_type = params.task_type.unwrap(); + + let now = Utc::now(); + + let task: Task = sqlx::query_as(query) + .bind(task_type) + .bind(now) + .fetch_one(pool) + .await?; + + Ok(task) + } + + async fn remove_all_scheduled_tasks( + query: &str, + pool: &Pool, + ) -> Result { + let now = Utc::now(); + + // This converts QueryResult to AnyQueryResult and then to u64 + // do not delete into() method and do not delete Into trait bound + + Ok( + Into::::into(sqlx::query(query).bind(now).execute(pool).await?) + .rows_affected(), + ) + } } impl BackendSqlXMySQL { diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs index 430c9605..a3ede36b 100644 --- a/fang/src/asynk/backend_sqlx/sqlite.rs +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -60,26 +60,20 @@ impl<'a> FromRow<'a, SqliteRow> for Task { let retries: i32 = row.get("retries"); - let scheduled_at_str: &str = row.get("scheduled_at"); + let scheduled_at: i64 = row.get("scheduled_at"); // This unwrap is safe because we know that the database returns the date in the correct format - let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let scheduled_at: DateTime = DateTime::from_timestamp(scheduled_at, 0).unwrap(); - let created_at_str: &str = row.get("created_at"); + let created_at: i64 = row.get("created_at"); // This unwrap is safe because we know that the database returns the date in the correct format - let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let created_at: DateTime = DateTime::from_timestamp(created_at, 0).unwrap(); - let updated_at_str: &str = row.get("updated_at"); + let updated_at: i64 = row.get("updated_at"); // This unwrap is safe because we know that the database returns the date in the correct format - let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let updated_at: DateTime = DateTime::from_timestamp(updated_at, 0).unwrap(); Ok(Task::builder() .id(id) diff --git a/fang/src/asynk/queries_postgres/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql index 01c24020..bb057d75 100644 --- a/fang/src/asynk/queries_postgres/fail_task.sql +++ b/fang/src/asynk/queries_postgres/fail_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3::timestamptz WHERE id = $4::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = to_timestamp($3) WHERE id = $4::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text \ No newline at end of file diff --git a/fang/src/asynk/queries_postgres/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql index 0bbd61f2..54ae20d3 100644 --- a/fang/src/asynk/queries_postgres/fetch_task_type.sql +++ b/fang/src/asynk/queries_postgres/fetch_task_type.sql @@ -1 +1 @@ -SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2::timestamptz >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND to_timestamp($2) >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql index a9c1fc3d..530ad8bd 100644 --- a/fang/src/asynk/queries_postgres/insert_task.sql +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1::uuid, $2::jsonb, $3, $4::timestamptz ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1::uuid, $2::jsonb, $3, to_timestamp($4) ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql index 99674daf..21e6ed8c 100644 --- a/fang/src/asynk/queries_postgres/insert_task_uniq.sql +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1::uuid, $2::jsonb , $3, $4, $5::timestamptz ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1::uuid, $2::jsonb , $3, $4, to_timestamp($5) ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql index c102d9c7..9acd38e1 100644 --- a/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql +++ b/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql @@ -1 +1 @@ -DELETE FROM "fang_tasks" WHERE scheduled_at > $1::timestamptz +DELETE FROM "fang_tasks" WHERE scheduled_at > to_timestamp($1) diff --git a/fang/src/asynk/queries_postgres/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql index 8933dc4e..e42eaa38 100644 --- a/fang/src/asynk/queries_postgres/retry_task.sql +++ b/fang/src/asynk/queries_postgres/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3::timestamptz, "updated_at" = $4::timestamptz WHERE id = $5::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = to_timestamp($3), "updated_at" = to_timestamp($4) WHERE id = $5::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_postgres/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql index a3602bfd..8801eee0 100644 --- a/fang/src/asynk/queries_postgres/update_task_state.sql +++ b/fang/src/asynk/queries_postgres/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2::timestamptz WHERE id = $3::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = to_timestamp($2) WHERE id = $3::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text diff --git a/fang/src/asynk/queries_sqlite/insert_task.sql b/fang/src/asynk/queries_sqlite/insert_task.sql index f188b0d8..00a03515 100644 --- a/fang/src/asynk/queries_sqlite/insert_task.sql +++ b/fang/src/asynk/queries_sqlite/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3, $4 ) RETURNING * +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3, $4) RETURNING * diff --git a/fang/src/blocking/mysql_schema.rs b/fang/src/blocking/mysql_schema.rs index d00b1a4f..c8a9d60e 100644 --- a/fang/src/blocking/mysql_schema.rs +++ b/fang/src/blocking/mysql_schema.rs @@ -24,11 +24,8 @@ diesel::table! { #[max_length = 64] uniq_hash -> Nullable, retries -> Integer, - #[max_length = 32] - scheduled_at -> Varchar, - #[max_length = 32] - created_at -> Varchar, - #[max_length = 32] - updated_at -> Varchar, + scheduled_at -> Datetime, + created_at -> Datetime, + updated_at -> Datetime, } } diff --git a/fang/src/blocking/sqlite_schema.rs b/fang/src/blocking/sqlite_schema.rs index 088b9f24..fe2e2a04 100644 --- a/fang/src/blocking/sqlite_schema.rs +++ b/fang/src/blocking/sqlite_schema.rs @@ -9,8 +9,8 @@ diesel::table! { task_type -> Text, uniq_hash -> Nullable, retries -> Integer, - scheduled_at -> Text, - created_at -> Text, - updated_at -> Text, + scheduled_at -> Integer, + created_at -> Integer, + updated_at -> Integer, } } diff --git a/fang/src/lib.rs b/fang/src/lib.rs index 269518ea..7c1d0096 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -2,12 +2,6 @@ #[cfg(feature = "blocking")] use diesel::{Identifiable, Queryable}; -#[cfg(feature = "asynk-sqlx")] -use sqlx::any::AnyRow; -#[cfg(feature = "asynk-sqlx")] -use sqlx::FromRow; -#[cfg(feature = "asynk-sqlx")] -use sqlx::Row; use std::time::Duration; use thiserror::Error; use typed_builder::TypedBuilder; @@ -179,69 +173,6 @@ pub struct Task { pub updated_at: DateTime, } -#[cfg(feature = "asynk-sqlx")] -impl<'a> FromRow<'a, AnyRow> for Task { - fn from_row(row: &'a AnyRow) -> Result { - let uuid_as_text: &str = row.get("id"); - - let id = Uuid::parse_str(uuid_as_text).unwrap(); - - let raw: &str = row.get("metadata"); // will work if database cast json to string - let raw = raw.replace('\\', ""); - - // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting - let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); - - // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 - let error_message: Option = row.get("error_message"); - - let state_str: &str = row.get("state"); // will work if database cast json to string - - let state: FangTaskState = state_str.into(); - - let task_type: String = row.get("task_type"); - - // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 - let uniq_hash: Option = row.get("uniq_hash"); - - let retries: i32 = row.get("retries"); - - let scheduled_at_str: &str = row.get("scheduled_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") - .unwrap() - .into(); - - let created_at_str: &str = row.get("created_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") - .unwrap() - .into(); - - let updated_at_str: &str = row.get("updated_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") - .unwrap() - .into(); - - Ok(Task::builder() - .id(id) - .metadata(metadata) - .error_message(error_message) - .state(state) - .task_type(task_type) - .uniq_hash(uniq_hash) - .retries(retries) - .scheduled_at(scheduled_at) - .created_at(created_at) - .updated_at(updated_at) - .build()) - } -} - #[doc(hidden)] #[cfg(feature = "blocking")] extern crate diesel; From 527868bbd763e3692f66755cf7e6ecf2ea58a071 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:54:47 +0200 Subject: [PATCH 80/90] sqlx uuid encoding and decoding for sqlite and postgres --- fang/Cargo.toml | 4 +- .../up.sql | 7 +-- fang/src/asynk/backend_sqlx.rs | 52 +++++-------------- fang/src/asynk/backend_sqlx/postgres.rs | 25 ++------- fang/src/asynk/backend_sqlx/sqlite.rs | 19 ++----- fang/src/asynk/queries_postgres/fail_task.sql | 2 +- .../queries_postgres/fetch_task_type.sql | 2 +- .../queries_postgres/find_task_by_id.sql | 2 +- .../find_task_by_uniq_hash.sql | 2 +- .../asynk/queries_postgres/insert_task.sql | 2 +- .../queries_postgres/insert_task_uniq.sql | 2 +- .../src/asynk/queries_postgres/retry_task.sql | 2 +- .../queries_postgres/update_task_state.sql | 2 +- fang/src/blocking/sqlite_schema.rs | 2 +- 14 files changed, 35 insertions(+), 90 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index a38cd19d..2fe1f2a3 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -35,7 +35,7 @@ migrations = ["dep:diesel_migrations"] [dev-dependencies] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} #console-subscriber = "0.2.0" # for tokio tracing debug [dependencies] @@ -52,7 +52,7 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "runtime-tokio-rustls"], optional = true} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "runtime-tokio-rustls"], optional = true} [dependencies.diesel] version = "2.1" diff --git a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 4c88cc79..4c9d6906 100644 --- a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -1,11 +1,8 @@ -- Your SQL goes here - --- docker exec -ti mysql mysql -u root -pfang -P 3360 fang -e "$(catn fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql)" - CREATE TABLE fang_tasks ( - id TEXT CHECK (LENGTH(id) = 36) NOT NULL PRIMARY KEY, -- UUID generated inside the language - -- why uuid is a text ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite + -- uuid will be stored as a 16 byte BLOB + id BLOB NOT NULL PRIMARY KEY, -- UUID generated inside the language metadata TEXT NOT NULL, -- why metadata is text ? https://stackoverflow.com/questions/16603621/how-to-store-json-object-in-sqlite-database#16603687 error_message TEXT, diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 43e9cfff..fcbbe3d3 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -159,6 +159,7 @@ where for<'r> &'r str: Encode<'r, DB> + Type, for<'r> i32: Encode<'r, DB> + Type, for<'r> i64: Encode<'r, DB> + Type, + for<'r> &'r Uuid: Encode<'r, DB> + Type, for<'r> &'r Pool: Executor<'r, Database = DB>, for<'r> >::Arguments: IntoArguments<'r, DB>, ::QueryResult: Into, @@ -204,17 +205,9 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .uuid - .unwrap() - .as_hyphenated() - .encode_lower(&mut buffer); + let uuid = params.uuid.unwrap(); - let task: Task = sqlx::query_as(query) - .bind(&*uuid_as_text) - .fetch_one(pool) - .await?; + let task: Task = sqlx::query_as(query).bind(uuid).fetch_one(pool).await?; Ok(task) } @@ -234,13 +227,7 @@ where let now = now_i64; let retries = params.task.unwrap().retries + 1; - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .task - .unwrap() - .id - .as_hyphenated() - .encode_lower(&mut buffer); + let uuid = params.uuid.unwrap(); let error = params.error_message.unwrap(); @@ -249,7 +236,7 @@ where .bind(retries) .bind(scheduled_at) .bind(now) - .bind(&*uuid_as_text) + .bind(uuid) .fetch_one(pool) .await?; @@ -262,8 +249,6 @@ where params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); let metadata = params.metadata.unwrap(); @@ -275,7 +260,7 @@ where let uniq_hash = calculate_hash(&metadata_str); let task: Task = sqlx::query_as(query) - .bind(uuid_as_str) + .bind(&uuid) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) @@ -291,8 +276,6 @@ where params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); let scheduled_at_i64 = params.scheduled_at.unwrap().timestamp(); @@ -300,7 +283,7 @@ where let task_type = params.task_type.unwrap(); let task: Task = sqlx::query_as(query) - .bind(uuid_as_str) + .bind(&uuid) .bind(metadata_str) .bind(task_type) .bind(scheduled_at_i64) @@ -321,13 +304,10 @@ where let uuid = params.uuid.unwrap(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - let task: Task = sqlx::query_as(query) .bind(state_str) .bind(updated_at) - .bind(&*uuid_as_text) + .bind(uuid) .fetch_one(pool) .await?; @@ -341,10 +321,7 @@ where ) -> Result { let updated_at = Utc::now().timestamp(); - let id = params.task.unwrap().id; - - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); + let uuid = params.task.unwrap().id; let error_message = params.error_message.unwrap(); @@ -352,7 +329,7 @@ where .bind(<&str>::from(FangTaskState::Failed)) .bind(error_message) .bind(updated_at) - .bind(&*uuid_as_text) + .bind(&uuid) .fetch_one(pool) .await?; @@ -391,15 +368,10 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .uuid - .unwrap() - .as_hyphenated() - .encode_lower(&mut buffer); + let uuid = params.uuid.unwrap(); let result = sqlx::query(query) - .bind(&*uuid_as_text) + .bind(uuid) .execute(pool) .await? .into() diff --git a/fang/src/asynk/backend_sqlx/postgres.rs b/fang/src/asynk/backend_sqlx/postgres.rs index 5afe304a..615ebbe4 100644 --- a/fang/src/asynk/backend_sqlx/postgres.rs +++ b/fang/src/asynk/backend_sqlx/postgres.rs @@ -42,9 +42,9 @@ use crate::Task; impl<'a> FromRow<'a, PgRow> for Task { fn from_row(row: &'a PgRow) -> Result { - let uuid_as_text: &str = row.get("id"); + let id: Uuid = row.get("id"); - let id = Uuid::parse_str(uuid_as_text).unwrap(); + //let id = Uuid::parse_str(uuid_as_text).unwrap(); let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); @@ -66,26 +66,11 @@ impl<'a> FromRow<'a, PgRow> for Task { let retries: i32 = row.get("retries"); - let scheduled_at_str: &str = row.get("scheduled_at"); + let scheduled_at: DateTime = row.get("scheduled_at"); - // This unwrap is safe because we know that the database returns the date in the correct format - let scheduled_at: DateTime = DateTime::parse_from_str(scheduled_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let created_at: DateTime = row.get("created_at"); - let created_at_str: &str = row.get("created_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let created_at: DateTime = DateTime::parse_from_str(created_at_str, "%F %T%.f%#z") - .unwrap() - .into(); - - let updated_at_str: &str = row.get("updated_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let updated_at: DateTime = DateTime::parse_from_str(updated_at_str, "%F %T%.f%#z") - .unwrap() - .into(); + let updated_at: DateTime = row.get("updated_at"); Ok(Task::builder() .id(id) diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs index a3ede36b..c039633d 100644 --- a/fang/src/asynk/backend_sqlx/sqlite.rs +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -36,9 +36,9 @@ use SqlXQuery as Q; impl<'a> FromRow<'a, SqliteRow> for Task { fn from_row(row: &'a SqliteRow) -> Result { - let uuid_as_text: &str = row.get("id"); + let id: Uuid = row.get("id"); - let id = Uuid::parse_str(uuid_as_text).unwrap(); + //let id = Uuid::parse_str(uuid_as_text).unwrap(); let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); @@ -60,20 +60,11 @@ impl<'a> FromRow<'a, SqliteRow> for Task { let retries: i32 = row.get("retries"); - let scheduled_at: i64 = row.get("scheduled_at"); + let scheduled_at: DateTime = row.get("scheduled_at"); - // This unwrap is safe because we know that the database returns the date in the correct format - let scheduled_at: DateTime = DateTime::from_timestamp(scheduled_at, 0).unwrap(); + let created_at: DateTime = row.get("created_at"); - let created_at: i64 = row.get("created_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let created_at: DateTime = DateTime::from_timestamp(created_at, 0).unwrap(); - - let updated_at: i64 = row.get("updated_at"); - - // This unwrap is safe because we know that the database returns the date in the correct format - let updated_at: DateTime = DateTime::from_timestamp(updated_at, 0).unwrap(); + let updated_at: DateTime = row.get("updated_at"); Ok(Task::builder() .id(id) diff --git a/fang/src/asynk/queries_postgres/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql index bb057d75..2cb1f337 100644 --- a/fang/src/asynk/queries_postgres/fail_task.sql +++ b/fang/src/asynk/queries_postgres/fail_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = to_timestamp($3) WHERE id = $4::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text \ No newline at end of file +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = to_timestamp($3) WHERE id = $4 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at \ No newline at end of file diff --git a/fang/src/asynk/queries_postgres/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql index 54ae20d3..3d39ca48 100644 --- a/fang/src/asynk/queries_postgres/fetch_task_type.sql +++ b/fang/src/asynk/queries_postgres/fetch_task_type.sql @@ -1 +1 @@ -SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND to_timestamp($2) >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND to_timestamp($2) >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_postgres/find_task_by_id.sql b/fang/src/asynk/queries_postgres/find_task_by_id.sql index d6e9ee80..7b0c419f 100644 --- a/fang/src/asynk/queries_postgres/find_task_by_id.sql +++ b/fang/src/asynk/queries_postgres/find_task_by_id.sql @@ -1 +1 @@ -SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE id = $1::uuid +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1::uuid diff --git a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql index df3d3aa7..3c7bb332 100644 --- a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql +++ b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql @@ -1 +1 @@ -SELECT id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql index 530ad8bd..e5becd6d 100644 --- a/fang/src/asynk/queries_postgres/insert_task.sql +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1::uuid, $2::jsonb, $3, to_timestamp($4) ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, to_timestamp($4) ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql index 21e6ed8c..9a8d3958 100644 --- a/fang/src/asynk/queries_postgres/insert_task_uniq.sql +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1::uuid, $2::jsonb , $3, $4, to_timestamp($5) ) RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, to_timestamp($5) ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql index e42eaa38..5ad6c9bb 100644 --- a/fang/src/asynk/queries_postgres/retry_task.sql +++ b/fang/src/asynk/queries_postgres/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = to_timestamp($3), "updated_at" = to_timestamp($4) WHERE id = $5::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = to_timestamp($3), "updated_at" = to_timestamp($4) WHERE id = $5::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql index 8801eee0..e41a533e 100644 --- a/fang/src/asynk/queries_postgres/update_task_state.sql +++ b/fang/src/asynk/queries_postgres/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = to_timestamp($2) WHERE id = $3::uuid RETURNING id::text , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at::text , created_at::text , updated_at::text +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = to_timestamp($2) WHERE id = $3::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/blocking/sqlite_schema.rs b/fang/src/blocking/sqlite_schema.rs index fe2e2a04..602d1756 100644 --- a/fang/src/blocking/sqlite_schema.rs +++ b/fang/src/blocking/sqlite_schema.rs @@ -2,7 +2,7 @@ diesel::table! { fang_tasks (id) { - id -> Text, + id -> Binary, metadata -> Text, error_message -> Nullable, state -> Text, From 0f0be0796ca1fa11d0c5f9c7a92be0c571e300e7 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 16 Apr 2024 10:57:04 +0200 Subject: [PATCH 81/90] delete uuid previous impl comment --- fang/src/asynk/backend_sqlx/postgres.rs | 2 -- fang/src/asynk/backend_sqlx/sqlite.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/fang/src/asynk/backend_sqlx/postgres.rs b/fang/src/asynk/backend_sqlx/postgres.rs index 615ebbe4..65638da3 100644 --- a/fang/src/asynk/backend_sqlx/postgres.rs +++ b/fang/src/asynk/backend_sqlx/postgres.rs @@ -44,8 +44,6 @@ impl<'a> FromRow<'a, PgRow> for Task { fn from_row(row: &'a PgRow) -> Result { let id: Uuid = row.get("id"); - //let id = Uuid::parse_str(uuid_as_text).unwrap(); - let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs index c039633d..65e7d8ee 100644 --- a/fang/src/asynk/backend_sqlx/sqlite.rs +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -38,8 +38,6 @@ impl<'a> FromRow<'a, SqliteRow> for Task { fn from_row(row: &'a SqliteRow) -> Result { let id: Uuid = row.get("id"); - //let id = Uuid::parse_str(uuid_as_text).unwrap(); - let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); From c611d607dc4f9e81b3cf44cef8b755a2cae36a53 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Tue, 16 Apr 2024 11:08:06 +0200 Subject: [PATCH 82/90] fix retry_task impl --- fang/src/asynk/backend_sqlx.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index fcbbe3d3..9c9164fd 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -225,9 +225,10 @@ where // shadowing in order to not change a lot depending on types let scheduled_at = scheduled_at.timestamp(); let now = now_i64; - let retries = params.task.unwrap().retries + 1; + let task = params.task.unwrap(); + let retries = task.retries + 1; - let uuid = params.uuid.unwrap(); + let uuid = task.id; let error = params.error_message.unwrap(); @@ -236,7 +237,7 @@ where .bind(retries) .bind(scheduled_at) .bind(now) - .bind(uuid) + .bind(&uuid) .fetch_one(pool) .await?; From 45d896a0d49d17e3fe0b63e52759ba5de07cb175 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 17 Apr 2024 09:40:13 +0200 Subject: [PATCH 83/90] fix mysql timestamp precision --- .../migrations/2023-08-17-102017_create_fang_tasks/up.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 5a695a97..50912b75 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -17,9 +17,9 @@ CREATE TABLE fang_tasks ( task_type VARCHAR(255) NOT NULL DEFAULT 'common', -- TEXT type can not have default value, stupid MySQL policy uniq_hash VARCHAR(64), retries INTEGER NOT NULL DEFAULT 0, - scheduled_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + scheduled_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + created_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + updated_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); From ea85604b4a8c4a233d95f873db6d5caf64123539 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:39:50 +0200 Subject: [PATCH 84/90] rework encoding Datime for postgresql and sqlite --- fang/src/asynk/backend_sqlx.rs | 20 +++++------ fang/src/asynk/backend_sqlx/mysql.rs | 35 ------------------- fang/src/asynk/queries_postgres/fail_task.sql | 2 +- .../queries_postgres/fetch_task_type.sql | 2 +- .../asynk/queries_postgres/insert_task.sql | 2 +- .../queries_postgres/insert_task_uniq.sql | 2 +- .../remove_all_scheduled_tasks.sql | 2 +- .../src/asynk/queries_postgres/retry_task.sql | 2 +- .../queries_postgres/update_task_state.sql | 2 +- 9 files changed, 15 insertions(+), 54 deletions(-) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index 9c9164fd..e4e1d51a 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -158,7 +158,7 @@ where for<'r> std::string::String: Encode<'r, DB> + Type, for<'r> &'r str: Encode<'r, DB> + Type, for<'r> i32: Encode<'r, DB> + Type, - for<'r> i64: Encode<'r, DB> + Type, + for<'r> DateTime: Encode<'r, DB> + Type, for<'r> &'r Uuid: Encode<'r, DB> + Type, for<'r> &'r Pool: Executor<'r, Database = DB>, for<'r> >::Arguments: IntoArguments<'r, DB>, @@ -173,7 +173,7 @@ where // and the caller is the library itself let task_type = params.task_type.unwrap(); - let now = Utc::now().timestamp(); + let now = Utc::now(); let task: Task = sqlx::query_as(query) .bind(task_type) @@ -218,13 +218,9 @@ where params: QueryParams<'_>, ) -> Result { let now = Utc::now(); - let now_i64 = now.timestamp(); let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); - // shadowing in order to not change a lot depending on types - let scheduled_at = scheduled_at.timestamp(); - let now = now_i64; let task = params.task.unwrap(); let retries = task.retries + 1; @@ -254,7 +250,7 @@ where let metadata = params.metadata.unwrap(); let metadata_str = metadata.to_string(); - let scheduled_at = params.scheduled_at.unwrap().timestamp(); + let scheduled_at = params.scheduled_at.unwrap(); let task_type = params.task_type.unwrap(); @@ -278,7 +274,7 @@ where ) -> Result { let uuid = Uuid::new_v4(); - let scheduled_at_i64 = params.scheduled_at.unwrap().timestamp(); + let scheduled_at = params.scheduled_at.unwrap(); let metadata_str = params.metadata.unwrap().to_string(); let task_type = params.task_type.unwrap(); @@ -287,7 +283,7 @@ where .bind(&uuid) .bind(metadata_str) .bind(task_type) - .bind(scheduled_at_i64) + .bind(scheduled_at) .fetch_one(pool) .await?; @@ -299,7 +295,7 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { - let updated_at = Utc::now().timestamp(); + let updated_at = Utc::now(); let state_str: &str = params.state.unwrap().into(); @@ -320,7 +316,7 @@ where pool: &Pool, params: QueryParams<'_>, ) -> Result { - let updated_at = Utc::now().timestamp(); + let updated_at = Utc::now(); let uuid = params.task.unwrap().id; @@ -351,7 +347,7 @@ where query: &str, pool: &Pool, ) -> Result { - let now = Utc::now().timestamp(); + let now = Utc::now(); // This converts QueryResult to AnyQueryResult and then to u64 // do not delete into() method and do not delete Into trait bound diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs index 6c92daf3..32a2d218 100644 --- a/fang/src/asynk/backend_sqlx/mysql.rs +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -350,41 +350,6 @@ impl FangQueryable for BackendSqlXMySQL { Ok(task) } - - async fn fetch_task_type( - query: &str, - pool: &Pool, - params: QueryParams<'_>, - ) -> Result { - // Unwraps by QueryParams are safe because the responsibility is of the caller - // and the caller is the library itself - let task_type = params.task_type.unwrap(); - - let now = Utc::now(); - - let task: Task = sqlx::query_as(query) - .bind(task_type) - .bind(now) - .fetch_one(pool) - .await?; - - Ok(task) - } - - async fn remove_all_scheduled_tasks( - query: &str, - pool: &Pool, - ) -> Result { - let now = Utc::now(); - - // This converts QueryResult to AnyQueryResult and then to u64 - // do not delete into() method and do not delete Into trait bound - - Ok( - Into::::into(sqlx::query(query).bind(now).execute(pool).await?) - .rows_affected(), - ) - } } impl BackendSqlXMySQL { diff --git a/fang/src/asynk/queries_postgres/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql index 2cb1f337..fd9c703f 100644 --- a/fang/src/asynk/queries_postgres/fail_task.sql +++ b/fang/src/asynk/queries_postgres/fail_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = to_timestamp($3) WHERE id = $4 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at \ No newline at end of file +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at \ No newline at end of file diff --git a/fang/src/asynk/queries_postgres/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql index 3d39ca48..e9aa6b55 100644 --- a/fang/src/asynk/queries_postgres/fetch_task_type.sql +++ b/fang/src/asynk/queries_postgres/fetch_task_type.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND to_timestamp($2) >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql index e5becd6d..30b0c64a 100644 --- a/fang/src/asynk/queries_postgres/insert_task.sql +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, to_timestamp($4) ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, $4 ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql index 9a8d3958..02d079f9 100644 --- a/fang/src/asynk/queries_postgres/insert_task_uniq.sql +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, to_timestamp($5) ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, $5 ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql index 9acd38e1..61a5b6b5 100644 --- a/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql +++ b/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql @@ -1 +1 @@ -DELETE FROM "fang_tasks" WHERE scheduled_at > to_timestamp($1) +DELETE FROM "fang_tasks" WHERE scheduled_at > $1 diff --git a/fang/src/asynk/queries_postgres/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql index 5ad6c9bb..0aee87b5 100644 --- a/fang/src/asynk/queries_postgres/retry_task.sql +++ b/fang/src/asynk/queries_postgres/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = to_timestamp($3), "updated_at" = to_timestamp($4) WHERE id = $5::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql index e41a533e..b9bd7200 100644 --- a/fang/src/asynk/queries_postgres/update_task_state.sql +++ b/fang/src/asynk/queries_postgres/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = to_timestamp($2) WHERE id = $3::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2 WHERE id = $3::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at From 94fd332d958e6b51fbdaba5f9530d572f312c895 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:53:24 +0200 Subject: [PATCH 85/90] rework encoding Uuid for mysql --- .../up.sql | 2 +- fang/src/asynk/backend_sqlx/mysql.rs | 47 +++---------------- fang/src/blocking/mysql_schema.rs | 4 +- 3 files changed, 9 insertions(+), 44 deletions(-) diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 50912b75..2fce8fb3 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -10,7 +10,7 @@ */ CREATE TABLE fang_tasks ( - id VARCHAR(36) DEFAULT (uuid()) PRIMARY KEY, + id BINARY(16) PRIMARY KEY, metadata VARCHAR(2048) NOT NULL, error_message VARCHAR(2048), state ENUM('new', 'in_progress', 'failed', 'finished', 'retried') NOT NULL DEFAULT 'new', diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs index 32a2d218..f787081f 100644 --- a/fang/src/asynk/backend_sqlx/mysql.rs +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -35,9 +35,7 @@ pub(super) struct BackendSqlXMySQL {} impl<'a> FromRow<'a, MySqlRow> for Task { fn from_row(row: &'a MySqlRow) -> Result { - let uuid_as_text: &str = row.get("id"); - - let id = Uuid::parse_str(uuid_as_text).unwrap(); + let id: Uuid = row.get("id"); let raw: &str = row.get("metadata"); // will work if database cast json to string let raw = raw.replace('\\', ""); @@ -87,8 +85,6 @@ impl FangQueryable for BackendSqlXMySQL { params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); let scheduled_at = params.scheduled_at.unwrap(); @@ -97,7 +93,7 @@ impl FangQueryable for BackendSqlXMySQL { let affected_rows = Into::::into( sqlx::query(query) - .bind(uuid_as_str) + .bind(&uuid) .bind(metadata_str) .bind(task_type) .bind(scheduled_at) @@ -136,14 +132,11 @@ impl FangQueryable for BackendSqlXMySQL { let uuid = params.uuid.unwrap(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - let affected_rows = Into::::into( sqlx::query(query) .bind(state_str) .bind(updated_at) - .bind(&*uuid_as_text) + .bind(&uuid) .execute(pool) .await?, ) @@ -174,8 +167,6 @@ impl FangQueryable for BackendSqlXMySQL { params: QueryParams<'_>, ) -> Result { let uuid = Uuid::new_v4(); - let mut buffer = Uuid::encode_buffer(); - let uuid_as_str: &str = uuid.as_hyphenated().encode_lower(&mut buffer); let metadata = params.metadata.unwrap(); @@ -189,7 +180,7 @@ impl FangQueryable for BackendSqlXMySQL { let affected_rows = Into::::into( sqlx::query(query) - .bind(uuid_as_str) + .bind(&uuid) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) @@ -227,9 +218,6 @@ impl FangQueryable for BackendSqlXMySQL { let id = params.task.unwrap().id; - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = id.as_hyphenated().encode_lower(&mut buffer); - let error_message = params.error_message.unwrap(); let affected_rows = Into::::into( @@ -237,7 +225,7 @@ impl FangQueryable for BackendSqlXMySQL { .bind(<&str>::from(FangTaskState::Failed)) .bind(error_message) .bind(updated_at) - .bind(&*uuid_as_text) + .bind(&id) .execute(pool) .await?, ) @@ -275,9 +263,6 @@ impl FangQueryable for BackendSqlXMySQL { let uuid = params.task.unwrap().id; - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = uuid.as_hyphenated().encode_lower(&mut buffer); - let error = params.error_message.unwrap(); let affected_rows = Into::::into( @@ -286,7 +271,7 @@ impl FangQueryable for BackendSqlXMySQL { .bind(retries) .bind(scheduled_at) .bind(now) - .bind(&*uuid_as_text) + .bind(&uuid) .execute(pool) .await?, ) @@ -330,26 +315,6 @@ impl FangQueryable for BackendSqlXMySQL { } } } - - async fn find_task_by_id( - query: &str, - pool: &Pool, - params: QueryParams<'_>, - ) -> Result { - let mut buffer = Uuid::encode_buffer(); - let uuid_as_text = params - .uuid - .unwrap() - .as_hyphenated() - .encode_lower(&mut buffer); - - let task: Task = sqlx::query_as(query) - .bind(&*uuid_as_text) - .fetch_one(pool) - .await?; - - Ok(task) - } } impl BackendSqlXMySQL { diff --git a/fang/src/blocking/mysql_schema.rs b/fang/src/blocking/mysql_schema.rs index c8a9d60e..e510d8a0 100644 --- a/fang/src/blocking/mysql_schema.rs +++ b/fang/src/blocking/mysql_schema.rs @@ -11,8 +11,8 @@ diesel::table! { use super::sql_types::FangTasksStateEnum; fang_tasks (id) { - #[max_length = 36] - id -> Varchar, + #[max_length = 16] + id -> Binary, #[max_length = 2048] metadata -> Varchar, #[max_length = 2048] From 9da8548586c46fdb3bde9643955b0d51ae28048c Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 17 Apr 2024 12:54:48 +0200 Subject: [PATCH 86/90] fix clippy --- fang/src/asynk/backend_sqlx/mysql.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs index f787081f..8ccd5224 100644 --- a/fang/src/asynk/backend_sqlx/mysql.rs +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -93,7 +93,7 @@ impl FangQueryable for BackendSqlXMySQL { let affected_rows = Into::::into( sqlx::query(query) - .bind(&uuid) + .bind(uuid) .bind(metadata_str) .bind(task_type) .bind(scheduled_at) @@ -136,7 +136,7 @@ impl FangQueryable for BackendSqlXMySQL { sqlx::query(query) .bind(state_str) .bind(updated_at) - .bind(&uuid) + .bind(uuid) .execute(pool) .await?, ) @@ -180,7 +180,7 @@ impl FangQueryable for BackendSqlXMySQL { let affected_rows = Into::::into( sqlx::query(query) - .bind(&uuid) + .bind(uuid) .bind(metadata_str) .bind(task_type) .bind(uniq_hash) @@ -225,7 +225,7 @@ impl FangQueryable for BackendSqlXMySQL { .bind(<&str>::from(FangTaskState::Failed)) .bind(error_message) .bind(updated_at) - .bind(&id) + .bind(id) .execute(pool) .await?, ) @@ -271,7 +271,7 @@ impl FangQueryable for BackendSqlXMySQL { .bind(retries) .bind(scheduled_at) .bind(now) - .bind(&uuid) + .bind(uuid) .execute(pool) .await?, ) From aafb352b71b946f7f6d1be6292460873f5b12730 Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 17 Apr 2024 20:17:03 +0200 Subject: [PATCH 87/90] fix serde_json::Value decoding and encoding --- fang/Cargo.toml | 4 ++-- .../2023-08-17-102017_create_fang_tasks/up.sql | 4 ++-- fang/src/asynk/backend_sqlx.rs | 7 ++++--- fang/src/asynk/backend_sqlx/mysql.rs | 12 ++++-------- fang/src/asynk/backend_sqlx/postgres.rs | 7 ++----- fang/src/asynk/backend_sqlx/sqlite.rs | 6 +----- fang/src/asynk/queries_postgres/fail_task.sql | 2 +- fang/src/asynk/queries_postgres/fetch_task_type.sql | 2 +- fang/src/asynk/queries_postgres/find_task_by_id.sql | 2 +- .../queries_postgres/find_task_by_uniq_hash.sql | 2 +- fang/src/asynk/queries_postgres/insert_task.sql | 2 +- fang/src/asynk/queries_postgres/insert_task_uniq.sql | 2 +- fang/src/asynk/queries_postgres/retry_task.sql | 2 +- .../src/asynk/queries_postgres/update_task_state.sql | 2 +- fang/src/blocking/mysql_schema.rs | 6 ++---- 15 files changed, 25 insertions(+), 37 deletions(-) diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 2fe1f2a3..3d7c793c 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -35,7 +35,7 @@ migrations = ["dep:diesel_migrations"] [dev-dependencies] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "json","runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} #console-subscriber = "0.2.0" # for tokio tracing debug [dependencies] @@ -52,7 +52,7 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} -sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "runtime-tokio-rustls"], optional = true} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "json", "runtime-tokio-rustls"], optional = true} [dependencies.diesel] version = "2.1" diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index 2fce8fb3..85efb31e 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -11,8 +11,8 @@ CREATE TABLE fang_tasks ( id BINARY(16) PRIMARY KEY, - metadata VARCHAR(2048) NOT NULL, - error_message VARCHAR(2048), + metadata JSON NOT NULL, + error_message TEXT, state ENUM('new', 'in_progress', 'failed', 'finished', 'retried') NOT NULL DEFAULT 'new', task_type VARCHAR(255) NOT NULL DEFAULT 'common', -- TEXT type can not have default value, stupid MySQL policy uniq_hash VARCHAR(64), diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs index e4e1d51a..4093ed13 100644 --- a/fang/src/asynk/backend_sqlx.rs +++ b/fang/src/asynk/backend_sqlx.rs @@ -160,6 +160,7 @@ where for<'r> i32: Encode<'r, DB> + Type, for<'r> DateTime: Encode<'r, DB> + Type, for<'r> &'r Uuid: Encode<'r, DB> + Type, + for<'r> &'r serde_json::Value: Encode<'r, DB> + Type, for<'r> &'r Pool: Executor<'r, Database = DB>, for<'r> >::Arguments: IntoArguments<'r, DB>, ::QueryResult: Into, @@ -258,7 +259,7 @@ where let task: Task = sqlx::query_as(query) .bind(&uuid) - .bind(metadata_str) + .bind(metadata) .bind(task_type) .bind(uniq_hash) .bind(scheduled_at) @@ -276,12 +277,12 @@ where let scheduled_at = params.scheduled_at.unwrap(); - let metadata_str = params.metadata.unwrap().to_string(); + let metadata = params.metadata.unwrap(); let task_type = params.task_type.unwrap(); let task: Task = sqlx::query_as(query) .bind(&uuid) - .bind(metadata_str) + .bind(metadata) .bind(task_type) .bind(scheduled_at) .fetch_one(pool) diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs index 8ccd5224..c0d51409 100644 --- a/fang/src/asynk/backend_sqlx/mysql.rs +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -37,11 +37,7 @@ impl<'a> FromRow<'a, MySqlRow> for Task { fn from_row(row: &'a MySqlRow) -> Result { let id: Uuid = row.get("id"); - let raw: &str = row.get("metadata"); // will work if database cast json to string - let raw = raw.replace('\\', ""); - - // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting - let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); + let metadata: serde_json::Value = row.get("metadata"); // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 let error_message: Option = row.get("error_message"); @@ -88,13 +84,13 @@ impl FangQueryable for BackendSqlXMySQL { let scheduled_at = params.scheduled_at.unwrap(); - let metadata_str = params.metadata.unwrap().to_string(); + let metadata = params.metadata.unwrap(); let task_type = params.task_type.unwrap(); let affected_rows = Into::::into( sqlx::query(query) .bind(uuid) - .bind(metadata_str) + .bind(metadata) .bind(task_type) .bind(scheduled_at) .execute(pool) @@ -181,7 +177,7 @@ impl FangQueryable for BackendSqlXMySQL { let affected_rows = Into::::into( sqlx::query(query) .bind(uuid) - .bind(metadata_str) + .bind(metadata) .bind(task_type) .bind(uniq_hash) .bind(scheduled_at) diff --git a/fang/src/asynk/backend_sqlx/postgres.rs b/fang/src/asynk/backend_sqlx/postgres.rs index 65638da3..74d56a13 100644 --- a/fang/src/asynk/backend_sqlx/postgres.rs +++ b/fang/src/asynk/backend_sqlx/postgres.rs @@ -44,11 +44,8 @@ impl<'a> FromRow<'a, PgRow> for Task { fn from_row(row: &'a PgRow) -> Result { let id: Uuid = row.get("id"); - let raw: &str = row.get("metadata"); // will work if database cast json to string - let raw = raw.replace('\\', ""); - - // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting - let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); + // -- SELECT metadata->>'type' FROM fang_tasks ; + let metadata: serde_json::Value = row.get("metadata"); // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 let error_message: Option = row.get("error_message"); diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs index 65e7d8ee..1d000e22 100644 --- a/fang/src/asynk/backend_sqlx/sqlite.rs +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -38,11 +38,7 @@ impl<'a> FromRow<'a, SqliteRow> for Task { fn from_row(row: &'a SqliteRow) -> Result { let id: Uuid = row.get("id"); - let raw: &str = row.get("metadata"); // will work if database cast json to string - let raw = raw.replace('\\', ""); - - // -- SELECT metadata->>'type' FROM fang_tasks ; this works because jsonb casting - let metadata: serde_json::Value = serde_json::from_str(&raw).unwrap(); + let metadata: serde_json::Value = row.get("metadata"); // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 let error_message: Option = row.get("error_message"); diff --git a/fang/src/asynk/queries_postgres/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql index fd9c703f..f8a9bdc7 100644 --- a/fang/src/asynk/queries_postgres/fail_task.sql +++ b/fang/src/asynk/queries_postgres/fail_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at \ No newline at end of file +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at \ No newline at end of file diff --git a/fang/src/asynk/queries_postgres/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql index e9aa6b55..14f4af22 100644 --- a/fang/src/asynk/queries_postgres/fetch_task_type.sql +++ b/fang/src/asynk/queries_postgres/fetch_task_type.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED +SELECT id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_postgres/find_task_by_id.sql b/fang/src/asynk/queries_postgres/find_task_by_id.sql index 7b0c419f..88c99be4 100644 --- a/fang/src/asynk/queries_postgres/find_task_by_id.sql +++ b/fang/src/asynk/queries_postgres/find_task_by_id.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1::uuid +SELECT id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1::uuid diff --git a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql index 3c7bb332..3c937a54 100644 --- a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql +++ b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql @@ -1 +1 @@ -SELECT id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 +SELECT id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql index 30b0c64a..f719d04c 100644 --- a/fang/src/asynk/queries_postgres/insert_task.sql +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, $4 ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, $4 ) RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql index 02d079f9..15a78ab2 100644 --- a/fang/src/asynk/queries_postgres/insert_task_uniq.sql +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -1 +1 @@ -INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, $5 ) RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, $5 ) RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql index 0aee87b5..e559422b 100644 --- a/fang/src/asynk/queries_postgres/retry_task.sql +++ b/fang/src/asynk/queries_postgres/retry_task.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5::uuid RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql index b9bd7200..8620e755 100644 --- a/fang/src/asynk/queries_postgres/update_task_state.sql +++ b/fang/src/asynk/queries_postgres/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2 WHERE id = $3::uuid RETURNING id , metadata::text , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2 WHERE id = $3::uuid RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/blocking/mysql_schema.rs b/fang/src/blocking/mysql_schema.rs index e510d8a0..94273909 100644 --- a/fang/src/blocking/mysql_schema.rs +++ b/fang/src/blocking/mysql_schema.rs @@ -13,10 +13,8 @@ diesel::table! { fang_tasks (id) { #[max_length = 16] id -> Binary, - #[max_length = 2048] - metadata -> Varchar, - #[max_length = 2048] - error_message -> Nullable, + metadata -> Json, + error_message -> Nullable, #[max_length = 11] state -> FangTasksStateEnum, #[max_length = 255] From e7c15bb474e00e882bdff25d8bd9200549c1dffd Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Wed, 17 Apr 2024 20:22:46 +0200 Subject: [PATCH 88/90] README stuff --- fang/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fang/README.md b/fang/README.md index c40fdc1f..4314f184 100644 --- a/fang/README.md +++ b/fang/README.md @@ -4,7 +4,7 @@ # Fang -Background task processing library for Rust. It can use PostgreSQL, SQLite or MySQL as a task queue. +Background task processing library for Rust. It can use PostgreSQL, SQLite or MySQL as an asyncronous task queue. ## Key Features @@ -31,7 +31,7 @@ Here are some of the fang's key features: ```toml [dependencies] -fang = { version = "0.11" , features = ["blocking"], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["blocking"], default-features = false } ``` #### the Asynk feature @@ -40,21 +40,21 @@ fang = { version = "0.11" , features = ["blocking"], default-features = false } ```toml [dependencies] -fang = { version = "0.11" , features = ["asynk-postgres"], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-postgres"], default-features = false } ``` - SQLite as a queue ```toml [dependencies] -fang = { version = "0.11" , features = ["asynk-sqlite"], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-sqlite"], default-features = false } ``` - MySQL as a queue ```toml [dependencies] -fang = { version = "0.11" , features = ["asynk-mysql"], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-mysql"], default-features = false } ``` #### the Asynk feature with derive macro @@ -63,16 +63,16 @@ Substitute `database` with your desired backend. ```toml [dependencies] -fang = { version = "0.11" , features = ["asynk-{database}", "derive-error" ], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-{database}", "derive-error" ], default-features = false } ``` #### All features ```toml -fang = { version = "0.11" } +fang = { version = "0.11.0-rc0" } ``` -_Supports rustc 1.62+_ +_Supports rustc 1.77+_ 1. Create the `fang_tasks` table in the database. The migration of each database can be found in `fang/{database}-migrations` where `database` is `postgres`, `mysql` or `sqlite`. @@ -80,7 +80,7 @@ Migrations can be also run as code, importing the feature `migrations-{database} ```toml [dependencies] -fang = { version = "0.11" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } ``` ```rust From af8e0b4f1f50789184b82384f758acbc745683bd Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:46:40 +0200 Subject: [PATCH 89/90] improving API --- .../asynk/simple_async_worker/src/lib.rs | 4 +- .../asynk/simple_cron_async_worker/src/lib.rs | 2 +- fang/src/asynk/async_queue.rs | 58 ++++++++----------- .../asynk/async_queue/async_queue_tests.rs | 24 ++++---- fang/src/asynk/async_runnable.rs | 2 +- fang/src/asynk/async_worker.rs | 16 ++--- 6 files changed, 49 insertions(+), 57 deletions(-) diff --git a/fang/fang_examples/asynk/simple_async_worker/src/lib.rs b/fang/fang_examples/asynk/simple_async_worker/src/lib.rs index cfd269b0..2a203920 100644 --- a/fang/fang_examples/asynk/simple_async_worker/src/lib.rs +++ b/fang/fang_examples/asynk/simple_async_worker/src/lib.rs @@ -33,7 +33,7 @@ impl MyFailingTask { #[async_trait] #[typetag::serde] impl AsyncRunnable for MyTask { - async fn run(&self, queue: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, queue: &dyn AsyncQueueable) -> Result<(), FangError> { let new_task = MyTask::new(self.number + 1); queue .insert_task(&new_task as &dyn AsyncRunnable) @@ -50,7 +50,7 @@ impl AsyncRunnable for MyTask { #[async_trait] #[typetag::serde] impl AsyncRunnable for MyFailingTask { - async fn run(&self, queue: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, queue: &dyn AsyncQueueable) -> Result<(), FangError> { let new_task = MyFailingTask::new(self.number + 1); queue .insert_task(&new_task as &dyn AsyncRunnable) diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs b/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs index 2bb972b4..2efc55ad 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs +++ b/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs @@ -13,7 +13,7 @@ pub struct MyCronTask {} #[async_trait] #[typetag::serde] impl AsyncRunnable for MyCronTask { - async fn run(&self, _queue: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queue: &dyn AsyncQueueable) -> Result<(), FangError> { log::info!("CRON!!!!!!!!!!!!!!!",); Ok(()) diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 5c76052e..c44abb0a 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -79,61 +79,57 @@ impl From for AsyncQueueError { /// This is implemented by the `AsyncQueue` struct which uses internally a `AnyPool` of `sqlx` to connect to the database. #[async_trait] -pub trait AsyncQueueable: Send { +pub trait AsyncQueueable: Send + Sync { /// This method should retrieve one task of the `task_type` type. If `task_type` is `None` it will try to /// fetch a task of the type `common`. After fetching it should update the state of the task to /// `FangTaskState::InProgress`. /// async fn fetch_and_touch_task( - &mut self, + &self, task_type: Option, ) -> Result, AsyncQueueError>; /// Enqueue a task to the queue, The task will be executed as soon as possible by the worker of the same type /// created by an AsyncWorkerPool. - async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result; + async fn insert_task(&self, task: &dyn AsyncRunnable) -> Result; /// The method will remove all tasks from the queue - async fn remove_all_tasks(&mut self) -> Result; + async fn remove_all_tasks(&self) -> Result; /// Remove all tasks that are scheduled in the future. - async fn remove_all_scheduled_tasks(&mut self) -> Result; + async fn remove_all_scheduled_tasks(&self) -> Result; /// Remove a task by its id. - async fn remove_task(&mut self, id: &Uuid) -> Result; + async fn remove_task(&self, id: &Uuid) -> Result; /// Remove a task by its metadata (struct fields values) async fn remove_task_by_metadata( - &mut self, + &self, task: &dyn AsyncRunnable, ) -> Result; /// Removes all tasks that have the specified `task_type`. - async fn remove_tasks_type(&mut self, task_type: &str) -> Result; + async fn remove_tasks_type(&self, task_type: &str) -> Result; /// Retrieve a task from storage by its `id`. - async fn find_task_by_id(&mut self, id: &Uuid) -> Result; + async fn find_task_by_id(&self, id: &Uuid) -> Result; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. async fn update_task_state( - &mut self, + &self, task: &Task, state: FangTaskState, ) -> Result; /// Update the state of a task to `FangTaskState::Failed` and set an error_message. - async fn fail_task( - &mut self, - task: &Task, - error_message: &str, - ) -> Result; + async fn fail_task(&self, task: &Task, error_message: &str) -> Result; /// Schedule a task. - async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result; + async fn schedule_task(&self, task: &dyn AsyncRunnable) -> Result; async fn schedule_retry( - &mut self, + &self, task: &Task, backoff_seconds: u32, error: &str, @@ -424,7 +420,7 @@ impl AsyncQueue { #[async_trait] impl AsyncQueueable for AsyncQueue { - async fn find_task_by_id(&mut self, id: &Uuid) -> Result { + async fn find_task_by_id(&self, id: &Uuid) -> Result { self.check_if_connection()?; let pool = &self.pool; @@ -441,7 +437,7 @@ impl AsyncQueueable for AsyncQueue { } async fn fetch_and_touch_task( - &mut self, + &self, task_type: Option, ) -> Result, AsyncQueueError> { self.check_if_connection()?; @@ -455,7 +451,7 @@ impl AsyncQueueable for AsyncQueue { Ok(task) } - async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result { + async fn insert_task(&self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established let pool = &self.pool; @@ -480,7 +476,7 @@ impl AsyncQueueable for AsyncQueue { Ok(task) } - async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result { + async fn schedule_task(&self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established let pool = &self.pool; @@ -491,7 +487,7 @@ impl AsyncQueueable for AsyncQueue { Ok(task) } - async fn remove_all_tasks(&mut self) -> Result { + async fn remove_all_tasks(&self) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established let pool = &self.pool; @@ -507,7 +503,7 @@ impl AsyncQueueable for AsyncQueue { Ok(result) } - async fn remove_all_scheduled_tasks(&mut self) -> Result { + async fn remove_all_scheduled_tasks(&self) -> Result { self.check_if_connection()?; // this unwrap is safe because we check if connection is established let pool = &self.pool; @@ -524,7 +520,7 @@ impl AsyncQueueable for AsyncQueue { Ok(result) } - async fn remove_task(&mut self, id: &Uuid) -> Result { + async fn remove_task(&self, id: &Uuid) -> Result { self.check_if_connection()?; let pool = &self.pool; let backend = pool.backend()?; @@ -540,7 +536,7 @@ impl AsyncQueueable for AsyncQueue { } async fn remove_task_by_metadata( - &mut self, + &self, task: &dyn AsyncRunnable, ) -> Result { if task.uniq() { @@ -561,7 +557,7 @@ impl AsyncQueueable for AsyncQueue { } } - async fn remove_tasks_type(&mut self, task_type: &str) -> Result { + async fn remove_tasks_type(&self, task_type: &str) -> Result { self.check_if_connection()?; let pool = &self.pool; let backend = pool.backend()?; @@ -577,7 +573,7 @@ impl AsyncQueueable for AsyncQueue { } async fn update_task_state( - &mut self, + &self, task: &Task, state: FangTaskState, ) -> Result { @@ -595,11 +591,7 @@ impl AsyncQueueable for AsyncQueue { Ok(task) } - async fn fail_task( - &mut self, - task: &Task, - error_message: &str, - ) -> Result { + async fn fail_task(&self, task: &Task, error_message: &str) -> Result { self.check_if_connection()?; let pool = &self.pool; let backend = pool.backend()?; @@ -618,7 +610,7 @@ impl AsyncQueueable for AsyncQueue { } async fn schedule_retry( - &mut self, + &self, task: &Task, backoff_seconds: u32, error: &str, diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index 62e836b3..3f60a738 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -15,7 +15,7 @@ pub(crate) struct AsyncTask { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } } @@ -28,7 +28,7 @@ pub(crate) struct AsyncUniqTask { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncUniqTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -46,7 +46,7 @@ pub(crate) struct AsyncTaskSchedule { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTaskSchedule { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -77,7 +77,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn insert_task_creates_new_task() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -91,7 +91,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn update_task_state_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -114,7 +114,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn failed_task_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -135,7 +135,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_all_tasks_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -161,7 +161,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn schedule_task_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let datetime = (Utc::now() + Duration::seconds(7)).round_subsecs(0); @@ -183,7 +183,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_all_scheduled_tasks_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let datetime = (Utc::now() + Duration::seconds(7)).round_subsecs(0); @@ -207,7 +207,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn fetch_and_touch_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -247,7 +247,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_tasks_type_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -279,7 +279,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_tasks_by_metadata() { //console_subscriber::init(); - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test .insert_task(&AsyncUniqTask { number: 1 }) diff --git a/fang/src/asynk/async_runnable.rs b/fang/src/asynk/async_runnable.rs index 3a73148d..bc0852b3 100644 --- a/fang/src/asynk/async_runnable.rs +++ b/fang/src/asynk/async_runnable.rs @@ -34,7 +34,7 @@ impl From for FangError { #[async_trait] pub trait AsyncRunnable: Send + Sync { /// Execute the task. This method should define its logic - async fn run(&self, client: &mut dyn AsyncQueueable) -> Result<(), FangError>; + async fn run(&self, client: &dyn AsyncQueueable) -> Result<(), FangError>; /// Define the type of the task. /// The `common` task type is used by default diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index 7c73227d..a0d3d420 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -30,7 +30,7 @@ where AQueue: AsyncQueueable + Clone + Sync + 'static, { async fn run(&mut self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { - let result = runnable.run(&mut self.queue).await; + let result = runnable.run(&self.queue).await; match result { Ok(_) => self.finalize_task(task, &result).await?, @@ -272,7 +272,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for WorkerAsyncTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } } @@ -285,7 +285,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for WorkerAsyncTaskSchedule { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } fn cron(&self) -> Option { @@ -299,7 +299,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for WorkerAsyncTaskScheduled { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { log::info!("WorkerAsyncTaskScheduled has been run"); tokio::time::sleep(std::time::Duration::from_millis(2050)).await; Ok(()) @@ -322,7 +322,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncFailedTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { let message = format!("number {} is wrong :(", self.number); Err(FangError { @@ -341,7 +341,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncRetryTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { let message = "Failed".to_string(); Err(FangError { @@ -360,7 +360,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTaskType1 { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -375,7 +375,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTaskType2 { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } From adff5b2edf2498893852dcb987cd1b43bc06633e Mon Sep 17 00:00:00 2001 From: pxp9 <48651252+pxp9@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:52:54 +0200 Subject: [PATCH 90/90] improving worker API --- fang/src/asynk/async_worker.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index a0d3d420..e9f1360f 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -29,7 +29,7 @@ impl AsyncWorker where AQueue: AsyncQueueable + Clone + Sync + 'static, { - async fn run(&mut self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { + async fn run(&self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { let result = runnable.run(&self.queue).await; match result { @@ -52,7 +52,7 @@ where } async fn finalize_task( - &mut self, + &self, task: &Task, result: &Result<(), FangError>, ) -> Result<(), FangError> { @@ -143,11 +143,7 @@ pub struct AsyncWorkerTest<'a> { #[cfg(test)] impl<'a> AsyncWorkerTest<'a> { - pub async fn run( - &mut self, - task: &Task, - runnable: &dyn AsyncRunnable, - ) -> Result<(), FangError> { + pub async fn run(&self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { let result = runnable.run(self.queue).await; match result { @@ -170,7 +166,7 @@ impl<'a> AsyncWorkerTest<'a> { } async fn finalize_task( - &mut self, + &self, task: &Task, result: &Result<(), FangError>, ) -> Result<(), FangError> { @@ -393,7 +389,7 @@ mod async_worker_tests { let task = insert_task(&mut test, &actual_task).await; let id = task.id; - let mut worker = AsyncWorkerTest::builder() + let worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) .retention_mode(RetentionMode::KeepAll) .build(); @@ -484,7 +480,7 @@ mod async_worker_tests { let task = insert_task(&mut test, &failed_task).await; let id = task.id; - let mut worker = AsyncWorkerTest::builder() + let worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) .retention_mode(RetentionMode::KeepAll) .build();