diff --git a/.env b/.env index 43ea1787..91c5e820 100644 --- a/.env +++ b/.env @@ -20,8 +20,11 @@ SQLITE_FILE=fang.db SQLITE_DIESEL_DIR=fang/sqlite_migrations SQLITE_MIGRATIONS=${SQLITE_DIESEL_DIR}/migrations SQLITE_CONFIG=${SQLITE_DIESEL_DIR}/diesel.toml +SQLITE_TESTS_DIR=tests_sqlite HOST=127.0.0.1 -POSTGRES_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST}/${POSTGRES_DB} -MYSQL_URL=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@${HOST}/${MYSQL_DB} +POSTGRES_BASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST} +POSTGRES_URL=${POSTGRES_BASE_URL}/${POSTGRES_DB} +MYSQL_BASE_URL=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@${HOST} +MYSQL_URL=${MYSQL_BASE_URL}/${MYSQL_DB} DATABASE_URL=${POSTGRES_URL} diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a36adf33..5aca75a5 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -3,17 +3,40 @@ name: Test and Build Rust on: push: pull_request: + types: [opened, reopened] schedule: # Check if it works with current dependencies (weekly on Wednesday 2:32 UTC) - cron: '32 2 * * 3' -env : - DATABASE_URL : postgres://postgres:postgres@localhost/fang jobs: - test: - name: Test + clippy: + name: Clippy runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + steps: + - uses: actions/checkout@v4 + + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + + - name: Run clippy + run: cargo clippy --verbose --all-targets --all-features -- -D warnings + + test_postgres_blocking: + name: Test blocking + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always services: # Label used to access the service container @@ -35,67 +58,196 @@ jobs: --health-timeout 5s --health-retries 5 + strategy: + matrix: + toolchain: + - stable + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - components: clippy - override: true - profile: minimal - toolchain: stable - - - name: Run clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --verbose --all-targets --all-features -- -D warnings + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Install diesel-cli - uses: actions-rs/cargo@v1 - with: - command: install - args: diesel_cli --no-default-features --features "postgres" + run: cargo install diesel_cli --no-default-features --features postgres - name: Setup Postgres db working-directory: ./fang/postgres_migrations - run: diesel setup + run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" + + - name: Run blocking tests + run: cargo test "blocking::queue::postgres" --verbose --features blocking --color always -- --nocapture + + - name: Run blocking dirty tests + run: cargo test "blocking::worker" --verbose --features blocking -- --ignored + + test_fang_derive_error: + name: Test fang_derive_error + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + steps: + - uses: actions/checkout@v4 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Run fang derive error tests + run: cargo test "fang_derive_error" --verbose --color always -- --nocapture + + test_postgres: + name: Test postgres + runs-on: ubuntu-latest + env: + DATABASE_URL: postgres://postgres:postgres@localhost/fang + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + # Set health checks to wait until postgres has started + + ports: + - 5432:5432 + + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 - - name: Change working dir - working-directory: ./../.. - run: pwd + steps: + - uses: actions/checkout@v4 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Install diesel-cli + run: cargo install diesel_cli --no-default-features --features postgres + + - name: Setup Postgres db + working-directory: ./fang/postgres_migrations + run: diesel setup --database-url "postgres://postgres:postgres@localhost/fang" - name: Run tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --verbose --all-features + run: cargo test "asynk::async_queue::postgres" --verbose --features asynk-postgres --color always -- --nocapture + + - name: Run worker tests + run: cargo test "asynk::async_worker::async_worker_tests" --verbose --features asynk-postgres --color always -- --nocapture + + test_sqlite: + name: Test sqlite + runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable - - name: Run dirty tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --verbose --all-features -- --ignored + steps: + - uses: actions/checkout@v4 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Install sqlite3 + run: | + sudo apt install -y sqlite3 + sqlite3 fang.db "VACUUM;" + mkdir tests_sqlite + + - name: Install diesel-cli + run: cargo install diesel_cli --no-default-features --features sqlite + + - name: Setup Sqlite db + working-directory: ./fang/sqlite_migrations + run: diesel setup --database-url "sqlite3://../../../fang.db" + + - name: Run tests + run: cargo test "asynk::async_queue::sqlite" --verbose --features asynk-sqlite -- --nocapture release: name: Release x86_64-unknown-linux-gnu runs-on: ubuntu-latest - needs: test + env: + CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v3 + strategy: + matrix: + toolchain: + - stable + steps: + - uses: actions/checkout@v4 - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - override: true - profile: minimal - target: x86_64-unknown-linux-gnu - toolchain: stable + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Build release - uses: actions-rs/cargo@v1 - with: - command: build - args: --release --verbose --all-features --target x86_64-unknown-linux-gnu + run: cargo build --release --verbose --all-features --target x86_64-unknown-linux-gnu + + test_mysql: + name: Test mysql + runs-on: ubuntu-latest + env: + DATABASE_URL: mysql://root:mysql@localhost/fang + CARGO_TERM_COLOR: always + + strategy: + matrix: + toolchain: + - stable + + services: + # Label used to access the service container + mysql: + # Docker Hub image + image: mysql:8.1 + # Provide the password for postgres + env: + MYSQL_ROOT_PASSWORD: mysql + MYSQL_DATABASE: fang + # here we should check if mysql is ready, but this does not work + options: >- + --health-cmd "mysqladmin ping -h localhost -u root -pmysql" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + ports: + - 3306:3306 + + steps: + - uses: actions/checkout@v4 + + - name: Setup Rust + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} + + - name: Install diesel-cli + run: cargo install diesel_cli --no-default-features --features mysql + + - name: Setup MySQL db + working-directory: ./fang/mysql_migrations + run: diesel setup --database-url "mysql://root:mysql@127.0.0.1/fang" + + - name: Run tests + run: cargo test "asynk::async_queue::mysql" --verbose --features asynk-mysql --color always -- --nocapture diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 1332e2b7..82f65ec8 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -3,24 +3,23 @@ name: Rust Code Formatting on: push: pull_request: + types: [opened, reopened] jobs: rustfmt: name: Rustfmt runs-on: ubuntu-latest + env: + CARGO_TERM_COLOR: always + strategy: + matrix: + toolchain: + - stable steps: - uses: actions/checkout@v3 - name: Setup Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - profile: minimal - components: rustfmt + run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - name: Check format - uses: actions-rs/cargo@v1 - with: - command: fmt - args: -- --check --verbose + run: cargo fmt -- --check --verbose diff --git a/.gitignore b/.gitignore index 61005740..b07ec398 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ Cargo.lock docs/content/docs/CHANGELOG.md docs/content/docs/README.md fang.db +tests_sqlite/ diff --git a/Makefile b/Makefile index fc340091..e1848509 100644 --- a/Makefile +++ b/Makefile @@ -47,6 +47,7 @@ db_mysql: db_sqlite: @echo -e $(BOLD)Setting up SQLite database...$(END_BOLD) sqlite3 "$(SQLITE_FILE)" "VACUUM;" + mkdir -p "$(SQLITE_TESTS_DIR)" $(MAKE) diesel_sqlite diesel: $(DIESEL_TARGETS) diff --git a/fang-derive-error/example/src/lib.rs b/fang-derive-error/example/src/lib.rs index 86262e6f..b4d6c781 100644 --- a/fang-derive-error/example/src/lib.rs +++ b/fang-derive-error/example/src/lib.rs @@ -7,7 +7,7 @@ pub enum MyAwesomeError { MyVariantErrorTwo(u32), } #[cfg(test)] -mod tests { +mod fang_derive_error_tests { use crate::MyAwesomeError; use fang::FangError; diff --git a/fang/Cargo.toml b/fang/Cargo.toml index 876adcdd..3d7c793c 100644 --- a/fang/Cargo.toml +++ b/fang/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "fang" -version = "0.10.4" +version = "0.11.0-rc0" authors = ["Ayrat Badykov " , "Pepe Márquez "] description = "Background job processing library for Rust" repository = "https://github.com/ayrat555/fang" edition = "2021" license = "MIT" readme = "README.md" -rust-version = "1.62" +rust-version = "1.77" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -15,22 +15,28 @@ rust-version = "1.62" doctest = false [features] -default = ["blocking", "asynk", "derive-error", "postgres", "mysql" , "sqlite", "migrations_postgres", "migrations_sqlite" , "migrations_mysql"] -blocking = ["dep:diesel", "dep:diesel-derive-enum", "dep:dotenvy", "diesel?/chrono" , "diesel?/serde_json" , "diesel?/uuid"] -asynk = ["dep:bb8-postgres", "dep:postgres-types", "dep:tokio", "dep:async-trait", "dep:async-recursion"] +default = ["blocking", "asynk-sqlx", "derive-error", "blocking-postgres", "blocking-mysql" , "blocking-sqlite", "migrations-postgres", "migrations-sqlite", "migrations-mysql"] +asynk-postgres = ["asynk-sqlx" , "sqlx?/postgres"] +asynk-sqlite = ["asynk-sqlx" , "sqlx?/sqlite"] +asynk-mysql = ["asynk-sqlx" , "sqlx?/mysql"] +asynk-sqlx = ["asynk" , "dep:sqlx"] +asynk = ["dep:tokio", "dep:async-trait", "dep:async-recursion" ] derive-error = ["dep:fang-derive-error"] -postgres = ["diesel?/postgres" , "diesel?/serde_json", "diesel?/chrono" , "diesel?/uuid" , "diesel?/r2d2"] -sqlite = ["diesel?/sqlite" , "diesel?/serde_json", "diesel?/chrono" , "diesel?/uuid" , "diesel?/r2d2"] -mysql = ["diesel?/mysql" , "diesel?/serde_json", "diesel?/chrono" , "diesel?/uuid" , "diesel?/r2d2"] -migrations_postgres = ["migrations"] -migrations_sqlite = ["migrations"] -migrations_mysql = ["migrations"] +blocking = ["dep:diesel", "dep:diesel-derive-enum", "dep:dotenvy", "diesel?/chrono" , "diesel?/serde_json" , "diesel?/uuid", "diesel?/r2d2"] +blocking-postgres = [ "blocking", "diesel?/postgres"] +blocking-sqlite = ["blocking", "diesel?/sqlite" ] +blocking-mysql = [ "blocking", "diesel?/mysql"] +migrations-postgres = ["migrations"] +migrations-sqlite = ["migrations"] +migrations-mysql = ["migrations"] migrations = ["dep:diesel_migrations"] [dev-dependencies] fang-derive-error = { version = "0.1.0"} diesel_migrations = { version = "2.1" , features = ["postgres", "sqlite" , "mysql"]} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "json","runtime-tokio-rustls", "postgres", "sqlite", "mysql"]} +#console-subscriber = "0.2.0" # for tokio tracing debug [dependencies] cron = "0.12" @@ -46,6 +52,7 @@ typed-builder = "0.14" typetag = "0.2" uuid = { version = "1.1", features = ["v4"] } fang-derive-error = { version = "0.1.0" , optional = true} +sqlx = {version = "0.6.3", features = ["any" , "macros" , "chrono", "uuid", "json", "runtime-tokio-rustls"], optional = true} [dependencies.diesel] version = "2.1" @@ -61,19 +68,9 @@ optional = true version = "0.15" optional = true -[dependencies.bb8-postgres] -version = "0.8" -features = ["with-serde_json-1" , "with-uuid-1" , "with-chrono-0_4"] -optional = true - -[dependencies.postgres-types] -version = "0.X.X" -features = ["derive"] -optional = true - [dependencies.tokio] version = "1.25" -features = ["rt", "time", "macros"] +features = ["rt", "time", "macros"]#, "tracing"] optional = true [dependencies.async-trait] @@ -87,4 +84,4 @@ optional = true [dependencies.diesel_migrations] version = "2.1.0" optional = true -default-features = false \ No newline at end of file +default-features = false diff --git a/fang/README.md b/fang/README.md index d4b1e092..4314f184 100644 --- a/fang/README.md +++ b/fang/README.md @@ -4,7 +4,7 @@ # Fang -Background task processing library for Rust. It uses Postgres DB as a task queue. +Background task processing library for Rust. It can use PostgreSQL, SQLite or MySQL as an asyncronous task queue. ## Key Features @@ -31,32 +31,62 @@ Here are some of the fang's key features: ```toml [dependencies] -fang = { version = "0.10.4" , features = ["blocking"], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["blocking"], default-features = false } ``` #### the Asynk feature +- PostgreSQL as a queue + +```toml +[dependencies] +fang = { version = "0.11.0-rc0" , features = ["asynk-postgres"], default-features = false } +``` + +- SQLite as a queue + +```toml +[dependencies] +fang = { version = "0.11.0-rc0" , features = ["asynk-sqlite"], default-features = false } +``` + +- MySQL as a queue + ```toml [dependencies] -fang = { version = "0.10.4" , features = ["asynk"], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-mysql"], default-features = false } ``` #### the Asynk feature with derive macro +Substitute `database` with your desired backend. + ```toml [dependencies] -fang = { version = "0.10.4" , features = ["asynk", "derive-error" ], default-features = false } +fang = { version = "0.11.0-rc0" , features = ["asynk-{database}", "derive-error" ], default-features = false } ``` #### All features ```toml -fang = { version = "0.10.4" } +fang = { version = "0.11.0-rc0" } ``` -_Supports rustc 1.62+_ +_Supports rustc 1.77+_ + +1. Create the `fang_tasks` table in the database. The migration of each database can be found in `fang/{database}-migrations` where `database` is `postgres`, `mysql` or `sqlite`. + +Migrations can be also run as code, importing the feature `migrations-{database}` being the `database` the backend queue you want to use. + +```toml +[dependencies] +fang = { version = "0.11.0-rc0" , features = ["asynk-postgres", "migrations-postgres" ], default-features = false } +``` -2. Create the `fang_tasks` table in the Postgres database. The migration can be found in [the migrations directory](https://github.com/ayrat555/fang/blob/master/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql). +```rust +use fang::run_migrations_postgres; +run_migrations_postgres(&mut connection).unwrap(); +``` ## Usage @@ -247,7 +277,6 @@ For Postgres backend: ```rust use fang::asynk::async_queue::AsyncQueue; -use fang::NoTls; use fang::AsyncRunnable; // Create an AsyncQueue @@ -261,10 +290,10 @@ let mut queue = AsyncQueue::builder() .build(); // Always connect first in order to perform any operation -queue.connect(NoTls).await.unwrap(); +queue.connect().await.unwrap(); ``` -As an easy example, we are using NoTls type. If for some reason you would like to encrypt Postgres requests, you can use [openssl](https://docs.rs/postgres-openssl/latest/postgres_openssl/) or [native-tls](https://docs.rs/postgres-native-tls/latest/postgres_native_tls/). +Encryption is always used with crate `rustls`. We plan to add the possibility of disabling it in the future. ```rust // AsyncTask from the first example @@ -310,7 +339,7 @@ use fang::asynk::async_worker_pool::AsyncWorkerPool; // Need to create a queue // Also insert some tasks -let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() +let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(max_pool_size) .queue(queue.clone()) // if you want to run tasks of the specific kind diff --git a/fang/fang_examples/asynk/simple_async_worker/Cargo.toml b/fang/fang_examples/asynk/simple_async_worker/Cargo.toml index 5e9d2446..54c4a3db 100644 --- a/fang/fang_examples/asynk/simple_async_worker/Cargo.toml +++ b/fang/fang_examples/asynk/simple_async_worker/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -fang = { path = "../../../" , features = ["asynk", "postgres"]} +fang = { path = "../../../" , features = ["asynk-postgres", "migrations-postgres"]} env_logger = "0.9.0" log = "0.4.0" dotenvy = "0.15" diff --git a/fang/fang_examples/asynk/simple_async_worker/src/lib.rs b/fang/fang_examples/asynk/simple_async_worker/src/lib.rs index cfd269b0..2a203920 100644 --- a/fang/fang_examples/asynk/simple_async_worker/src/lib.rs +++ b/fang/fang_examples/asynk/simple_async_worker/src/lib.rs @@ -33,7 +33,7 @@ impl MyFailingTask { #[async_trait] #[typetag::serde] impl AsyncRunnable for MyTask { - async fn run(&self, queue: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, queue: &dyn AsyncQueueable) -> Result<(), FangError> { let new_task = MyTask::new(self.number + 1); queue .insert_task(&new_task as &dyn AsyncRunnable) @@ -50,7 +50,7 @@ impl AsyncRunnable for MyTask { #[async_trait] #[typetag::serde] impl AsyncRunnable for MyFailingTask { - async fn run(&self, queue: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, queue: &dyn AsyncQueueable) -> Result<(), FangError> { let new_task = MyFailingTask::new(self.number + 1); queue .insert_task(&new_task as &dyn AsyncRunnable) diff --git a/fang/fang_examples/asynk/simple_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_async_worker/src/main.rs index 5a148a5d..fdc7eef1 100644 --- a/fang/fang_examples/asynk/simple_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_async_worker/src/main.rs @@ -7,7 +7,6 @@ use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::run_migrations_postgres; use fang::AsyncRunnable; -use fang::NoTls; use simple_async_worker::MyFailingTask; use simple_async_worker::MyTask; use std::env; @@ -36,10 +35,10 @@ async fn main() { .max_pool_size(max_pool_size) .build(); - queue.connect(NoTls).await.unwrap(); + queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml b/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml index cad5f651..7a6946ba 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml +++ b/fang/fang_examples/asynk/simple_cron_async_worker/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -fang = { path = "../../../" , features = ["asynk"]} +fang = { path = "../../../" , features = ["asynk-postgres"]} env_logger = "0.9.0" log = "0.4.0" dotenvy = "0.15" diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs b/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs index 2bb972b4..2efc55ad 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs +++ b/fang/fang_examples/asynk/simple_cron_async_worker/src/lib.rs @@ -13,7 +13,7 @@ pub struct MyCronTask {} #[async_trait] #[typetag::serde] impl AsyncRunnable for MyCronTask { - async fn run(&self, _queue: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queue: &dyn AsyncQueueable) -> Result<(), FangError> { log::info!("CRON!!!!!!!!!!!!!!!",); Ok(()) diff --git a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs index e7b7929f..34709be3 100644 --- a/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs +++ b/fang/fang_examples/asynk/simple_cron_async_worker/src/main.rs @@ -3,7 +3,6 @@ use fang::asynk::async_queue::AsyncQueue; use fang::asynk::async_queue::AsyncQueueable; use fang::asynk::async_worker_pool::AsyncWorkerPool; use fang::AsyncRunnable; -use fang::NoTls; use simple_cron_async_worker::MyCronTask; use std::env; use std::time::Duration; @@ -21,10 +20,10 @@ async fn main() { .max_pool_size(max_pool_size) .build(); - queue.connect(NoTls).await.unwrap(); + queue.connect().await.unwrap(); log::info!("Queue connected..."); - let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() + let mut pool: AsyncWorkerPool = AsyncWorkerPool::builder() .number_of_workers(10_u32) .queue(queue.clone()) .build(); diff --git a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index b882b72b..85efb31e 100644 --- a/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -3,17 +3,23 @@ -- docker exec -ti mysql mysql -u root -pfang -P 3360 fang -e "$(catn fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql)" + /* + why `metadata` and `error_message` are not a TEXT ? + MySQL TEXT type, I think it is stored as a BLOB. + So that breaks FromRow trait, implemented in lib.rs line 183 + */ + CREATE TABLE fang_tasks ( - id VARCHAR(36) DEFAULT (uuid()) PRIMARY KEY, + id BINARY(16) PRIMARY KEY, metadata JSON NOT NULL, error_message TEXT, state ENUM('new', 'in_progress', 'failed', 'finished', 'retried') NOT NULL DEFAULT 'new', task_type VARCHAR(255) NOT NULL DEFAULT 'common', -- TEXT type can not have default value, stupid MySQL policy - uniq_hash CHAR(64), + uniq_hash VARCHAR(64), retries INTEGER NOT NULL DEFAULT 0, - scheduled_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + scheduled_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + created_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + updated_at DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); diff --git a/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql b/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql index cd4b3544..eee11c74 100644 --- a/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql +++ b/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql @@ -7,8 +7,8 @@ CREATE TABLE fang_tasks ( metadata jsonb NOT NULL, error_message TEXT, state fang_task_state DEFAULT 'new' NOT NULL, - task_type VARCHAR DEFAULT 'common' NOT NULL, - uniq_hash CHAR(64), + task_type TEXT DEFAULT 'common' NOT NULL, + uniq_hash TEXT, -- just for compatibility with sqlx is text retries INTEGER DEFAULT 0 NOT NULL, scheduled_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), diff --git a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql index afc60e3e..4c9d6906 100644 --- a/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql +++ b/fang/sqlite_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql @@ -1,11 +1,8 @@ -- Your SQL goes here - --- docker exec -ti mysql mysql -u root -pfang -P 3360 fang -e "$(catn fang/mysql_migrations/migrations/2023-08-17-102017_create_fang_tasks/up.sql)" - CREATE TABLE fang_tasks ( - id TEXT CHECK (LENGTH(id) = 36) NOT NULL PRIMARY KEY, -- UUID generated inside the language - -- why uuid is a text ? https://stackoverflow.com/questions/17277735/using-uuids-in-sqlite + -- uuid will be stored as a 16 byte BLOB + id BLOB NOT NULL PRIMARY KEY, -- UUID generated inside the language metadata TEXT NOT NULL, -- why metadata is text ? https://stackoverflow.com/questions/16603621/how-to-store-json-object-in-sqlite-database#16603687 error_message TEXT, @@ -14,12 +11,13 @@ CREATE TABLE fang_tasks ( task_type TEXT NOT NULL DEFAULT 'common', uniq_hash CHAR(64), retries INTEGER NOT NULL DEFAULT 0, - -- The datetime() function returns the date and time as text in this formats: YYYY-MM-DD HH:MM:SS. - -- https://www.sqlite.org/lang_datefunc.html - scheduled_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - -- why timestamps are texts ? https://www.sqlite.org/datatype3.html - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + -- scheduled_at TEXT NOT NULL DEFAULT (CURRENT_TIMESTAMP || '.000000+00'), + + -- Timestamps are stored as the number of seconds since the Unix epoch ('1970-01-01 00:00:00 UTC'). + + scheduled_at INTEGER NOT NULL DEFAULT (unixepoch('now')), + created_at INTEGER NOT NULL DEFAULT (unixepoch('now')), + updated_at INTEGER NOT NULL DEFAULT (unixepoch('now')) ); CREATE INDEX fang_tasks_state_index ON fang_tasks(state); diff --git a/fang/src/asynk.rs b/fang/src/asynk.rs index a75dd036..2a8ab87d 100644 --- a/fang/src/asynk.rs +++ b/fang/src/asynk.rs @@ -2,6 +2,7 @@ pub mod async_queue; pub mod async_runnable; pub mod async_worker; pub mod async_worker_pool; +pub mod backend_sqlx; pub use async_queue::*; pub use async_runnable::AsyncRunnable; diff --git a/fang/src/asynk/async_queue.rs b/fang/src/asynk/async_queue.rs index 67117ec5..c44abb0a 100644 --- a/fang/src/asynk/async_queue.rs +++ b/fang/src/asynk/async_queue.rs @@ -2,58 +2,55 @@ mod async_queue_tests; use crate::asynk::async_runnable::AsyncRunnable; +use crate::backend_sqlx::QueryParams; +use crate::backend_sqlx::SqlXQuery; use crate::CronError; use crate::FangTaskState; use crate::Scheduled::*; use crate::Task; use async_trait::async_trait; -use bb8_postgres::bb8::Pool; -use bb8_postgres::bb8::RunError; -use bb8_postgres::tokio_postgres::row::Row; -use bb8_postgres::tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; -use bb8_postgres::tokio_postgres::Socket; -use bb8_postgres::tokio_postgres::Transaction; -use bb8_postgres::PostgresConnectionManager; + use chrono::DateTime; -use chrono::Duration; use chrono::Utc; use cron::Schedule; -use postgres_types::ToSql; -use sha2::{Digest, Sha256}; +use sqlx::any::AnyConnectOptions; +use sqlx::any::AnyKind; +#[cfg(any( + feature = "asynk-postgres", + feature = "asynk-mysql", + feature = "asynk-sqlite" +))] +use sqlx::pool::PoolOptions; +//use sqlx::any::install_default_drivers; // this is supported in sqlx 0.7 use std::str::FromStr; use thiserror::Error; use typed_builder::TypedBuilder; use uuid::Uuid; -#[cfg(test)] -use bb8_postgres::tokio_postgres::tls::NoTls; +#[cfg(feature = "asynk-postgres")] +use sqlx::PgPool; +#[cfg(feature = "asynk-postgres")] +use sqlx::Postgres; + +#[cfg(feature = "asynk-mysql")] +use sqlx::MySql; +#[cfg(feature = "asynk-mysql")] +use sqlx::MySqlPool; + +#[cfg(feature = "asynk-sqlite")] +use sqlx::Sqlite; +#[cfg(feature = "asynk-sqlite")] +use sqlx::SqlitePool; #[cfg(test)] use self::async_queue_tests::test_asynk_queue; -const INSERT_TASK_QUERY: &str = include_str!("queries/insert_task.sql"); -const INSERT_TASK_UNIQ_QUERY: &str = include_str!("queries/insert_task_uniq.sql"); -const UPDATE_TASK_STATE_QUERY: &str = include_str!("queries/update_task_state.sql"); -const FAIL_TASK_QUERY: &str = include_str!("queries/fail_task.sql"); -const REMOVE_ALL_TASK_QUERY: &str = include_str!("queries/remove_all_tasks.sql"); -const REMOVE_ALL_SCHEDULED_TASK_QUERY: &str = - include_str!("queries/remove_all_scheduled_tasks.sql"); -const REMOVE_TASK_QUERY: &str = include_str!("queries/remove_task.sql"); -const REMOVE_TASK_BY_METADATA_QUERY: &str = include_str!("queries/remove_task_by_metadata.sql"); -const REMOVE_TASKS_TYPE_QUERY: &str = include_str!("queries/remove_tasks_type.sql"); -const FETCH_TASK_TYPE_QUERY: &str = include_str!("queries/fetch_task_type.sql"); -const FIND_TASK_BY_UNIQ_HASH_QUERY: &str = include_str!("queries/find_task_by_uniq_hash.sql"); -const FIND_TASK_BY_ID_QUERY: &str = include_str!("queries/find_task_by_id.sql"); -const RETRY_TASK_QUERY: &str = include_str!("queries/retry_task.sql"); - pub const DEFAULT_TASK_TYPE: &str = "common"; #[derive(Debug, Error)] pub enum AsyncQueueError { #[error(transparent)] - PoolError(#[from] RunError), - #[error(transparent)] - PgError(#[from] bb8_postgres::tokio_postgres::Error), + SqlXError(#[from] sqlx::Error), #[error(transparent)] SerdeError(#[from] serde_json::Error), #[error(transparent)] @@ -64,6 +61,8 @@ pub enum AsyncQueueError { "AsyncQueue is not connected :( , call connect() method first and then perform operations" )] NotConnectedError, + #[error("AsyncQueue generic does not correspond to uri BackendSqlX")] + ConnectionError, #[error("Can not convert `std::time::Duration` to `chrono::Duration`")] TimeError, #[error("Can not perform this operation if task is not uniq, please check its definition in impl AsyncRunnable")] @@ -77,65 +76,60 @@ impl From for AsyncQueueError { } /// This trait defines operations for an asynchronous queue. -/// The trait can be implemented for different storage backends. -/// For now, the trait is only implemented for PostgreSQL. More backends are planned to be implemented in the future. +/// This is implemented by the `AsyncQueue` struct which uses internally a `AnyPool` of `sqlx` to connect to the database. #[async_trait] -pub trait AsyncQueueable: Send { +pub trait AsyncQueueable: Send + Sync { /// This method should retrieve one task of the `task_type` type. If `task_type` is `None` it will try to /// fetch a task of the type `common`. After fetching it should update the state of the task to /// `FangTaskState::InProgress`. /// async fn fetch_and_touch_task( - &mut self, + &self, task_type: Option, ) -> Result, AsyncQueueError>; /// Enqueue a task to the queue, The task will be executed as soon as possible by the worker of the same type /// created by an AsyncWorkerPool. - async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result; + async fn insert_task(&self, task: &dyn AsyncRunnable) -> Result; /// The method will remove all tasks from the queue - async fn remove_all_tasks(&mut self) -> Result; + async fn remove_all_tasks(&self) -> Result; /// Remove all tasks that are scheduled in the future. - async fn remove_all_scheduled_tasks(&mut self) -> Result; + async fn remove_all_scheduled_tasks(&self) -> Result; /// Remove a task by its id. - async fn remove_task(&mut self, id: Uuid) -> Result; + async fn remove_task(&self, id: &Uuid) -> Result; /// Remove a task by its metadata (struct fields values) async fn remove_task_by_metadata( - &mut self, + &self, task: &dyn AsyncRunnable, ) -> Result; /// Removes all tasks that have the specified `task_type`. - async fn remove_tasks_type(&mut self, task_type: &str) -> Result; + async fn remove_tasks_type(&self, task_type: &str) -> Result; /// Retrieve a task from storage by its `id`. - async fn find_task_by_id(&mut self, id: Uuid) -> Result; + async fn find_task_by_id(&self, id: &Uuid) -> Result; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. async fn update_task_state( - &mut self, + &self, task: &Task, state: FangTaskState, ) -> Result; /// Update the state of a task to `FangTaskState::Failed` and set an error_message. - async fn fail_task( - &mut self, - task: &Task, - error_message: &str, - ) -> Result; + async fn fail_task(&self, task: &Task, error_message: &str) -> Result; /// Schedule a task. - async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result; + async fn schedule_task(&self, task: &dyn AsyncRunnable) -> Result; async fn schedule_retry( - &mut self, + &self, task: &Task, backoff_seconds: u32, error: &str, @@ -154,17 +148,64 @@ pub trait AsyncQueueable: Send { /// .build(); /// ``` /// +/// + +#[derive(Debug, Clone)] +pub(crate) enum InternalPool { + #[cfg(feature = "asynk-postgres")] + Pg(PgPool), + #[cfg(feature = "asynk-mysql")] + MySql(MySqlPool), + #[cfg(feature = "asynk-sqlite")] + Sqlite(SqlitePool), + NoBackend, +} + +impl InternalPool { + #[cfg(feature = "asynk-postgres")] + pub(crate) fn unwrap_pg_pool(&self) -> &PgPool { + match self { + InternalPool::Pg(pool) => pool, + #[allow(unreachable_patterns)] + _ => panic!("Not a PgPool!"), + } + } + + #[cfg(feature = "asynk-mysql")] + pub(crate) fn unwrap_mysql_pool(&self) -> &MySqlPool { + match self { + InternalPool::MySql(pool) => pool, + #[allow(unreachable_patterns)] + _ => panic!("Not a MySqlPool!"), + } + } + + #[cfg(feature = "asynk-sqlite")] + pub(crate) fn unwrap_sqlite_pool(&self) -> &SqlitePool { + match self { + InternalPool::Sqlite(pool) => pool, + #[allow(unreachable_patterns)] + _ => panic!("Not a SqlitePool!"), + } + } + + pub(crate) fn backend(&self) -> Result { + match *self { + #[cfg(feature = "asynk-postgres")] + InternalPool::Pg(_) => Ok(BackendSqlX::Pg), + #[cfg(feature = "asynk-mysql")] + InternalPool::MySql(_) => Ok(BackendSqlX::MySql), + #[cfg(feature = "asynk-sqlite")] + InternalPool::Sqlite(_) => Ok(BackendSqlX::Sqlite), + InternalPool::NoBackend => Err(AsyncQueueError::NotConnectedError), + } + } +} #[derive(TypedBuilder, Debug, Clone)] -pub struct AsyncQueue -where - Tls: MakeTlsConnect + Clone + Send + Sync + 'static, - >::Stream: Send + Sync, - >::TlsConnect: Send, - <>::TlsConnect as TlsConnect>::Future: Send, -{ - #[builder(default=None, setter(skip))] - pool: Option>>, +pub struct AsyncQueue { + #[builder(default=InternalPool::NoBackend, setter(skip))] + pool: InternalPool, #[builder(setter(into))] uri: String, #[builder(setter(into))] @@ -176,62 +217,65 @@ where #[cfg(test)] use tokio::sync::Mutex; -#[cfg(test)] -static ASYNC_QUEUE_DB_TEST_COUNTER: Mutex = Mutex::const_new(0); - -#[cfg(test)] -impl AsyncQueue { - /// Provides an AsyncQueue connected to its own DB - pub async fn test() -> Self { - const BASE_URI: &str = "postgres://postgres:postgres@localhost"; - let mut res = Self::builder() - .max_pool_size(1_u32) - .uri(format!("{}/fang", BASE_URI)) - .build(); - - let mut new_number = ASYNC_QUEUE_DB_TEST_COUNTER.lock().await; - res.connect(NoTls).await.unwrap(); +#[cfg(all(test, feature = "asynk-postgres"))] +static ASYNC_QUEUE_POSTGRES_TEST_COUNTER: Mutex = Mutex::const_new(0); - let db_name = format!("async_queue_test_{}", *new_number); - *new_number += 1; +#[cfg(all(test, feature = "asynk-sqlite"))] +static ASYNC_QUEUE_SQLITE_TEST_COUNTER: Mutex = Mutex::const_new(0); - let create_query = format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); - let delete_query = format!("DROP DATABASE IF EXISTS {};", db_name); +#[cfg(all(test, feature = "asynk-mysql"))] +static ASYNC_QUEUE_MYSQL_TEST_COUNTER: Mutex = Mutex::const_new(0); - let conn = res.pool.as_mut().unwrap().get().await.unwrap(); +#[cfg(test)] +use sqlx::Executor; - log::info!("Deleting database {db_name} ..."); - conn.execute(&delete_query, &[]).await.unwrap(); +#[cfg(all(test, feature = "asynk-sqlite"))] +use std::path::Path; - log::info!("Creating database {db_name} ..."); - while let Err(e) = conn.execute(&create_query, &[]).await { - if e.as_db_error().unwrap().message() - != "source database \"fang\" is being accessed by other users" - { - panic!("{:?}", e); - } +#[cfg(test)] +use std::env; + +use super::backend_sqlx::BackendSqlX; + +async fn get_pool( + kind: AnyKind, + _uri: &str, + _max_connections: u32, +) -> Result { + match kind { + #[cfg(feature = "asynk-postgres")] + AnyKind::Postgres => { + let pool = PoolOptions::::new() + .max_connections(_max_connections) + .connect(_uri) + .await?; + + Ok(InternalPool::Pg(pool)) } - - log::info!("Database {db_name} created !!"); - - drop(conn); - - res.connected = false; - res.pool = None; - res.uri = format!("{}/{}", BASE_URI, db_name); - res.connect(NoTls).await.unwrap(); - - res + #[cfg(feature = "asynk-mysql")] + AnyKind::MySql => { + let pool = PoolOptions::::new() + .max_connections(_max_connections) + .connect(_uri) + .await?; + + Ok(InternalPool::MySql(pool)) + } + #[cfg(feature = "asynk-sqlite")] + AnyKind::Sqlite => { + let pool = PoolOptions::::new() + .max_connections(_max_connections) + .connect(_uri) + .await?; + + Ok(InternalPool::Sqlite(pool)) + } + #[allow(unreachable_patterns)] + _ => Err(AsyncQueueError::ConnectionError), } } -impl AsyncQueue -where - Tls: MakeTlsConnect + Clone + Send + Sync + 'static, - >::Stream: Send + Sync, - >::TlsConnect: Send, - <>::TlsConnect as TlsConnect>::Future: Send, -{ +impl AsyncQueue { /// Check if the connection with db is established pub fn check_if_connection(&self) -> Result<(), AsyncQueueError> { if self.connected { @@ -242,122 +286,21 @@ where } /// Connect to the db if not connected - pub async fn connect(&mut self, tls: Tls) -> Result<(), AsyncQueueError> { - let manager = PostgresConnectionManager::new_from_stringlike(self.uri.clone(), tls)?; + pub async fn connect(&mut self) -> Result<(), AsyncQueueError> { + //install_default_drivers(); + + let kind: AnyKind = self.uri.parse::()?.kind(); - let pool = Pool::builder() - .max_size(self.max_pool_size) - .build(manager) - .await?; + let pool = get_pool(kind, &self.uri, self.max_pool_size).await?; - self.pool = Some(pool); + self.pool = pool; self.connected = true; Ok(()) } - async fn remove_all_tasks_query( - transaction: &mut Transaction<'_>, - ) -> Result { - Self::execute_query(transaction, REMOVE_ALL_TASK_QUERY, &[], None).await - } - - async fn remove_all_scheduled_tasks_query( - transaction: &mut Transaction<'_>, - ) -> Result { - Self::execute_query( - transaction, - REMOVE_ALL_SCHEDULED_TASK_QUERY, - &[&Utc::now()], - None, - ) - .await - } - - async fn remove_task_query( - transaction: &mut Transaction<'_>, - id: Uuid, - ) -> Result { - Self::execute_query(transaction, REMOVE_TASK_QUERY, &[&id], Some(1)).await - } - - async fn remove_task_by_metadata_query( - transaction: &mut Transaction<'_>, - task: &dyn AsyncRunnable, - ) -> Result { - let metadata = serde_json::to_value(task)?; - - let uniq_hash = Self::calculate_hash(metadata.to_string()); - - Self::execute_query( - transaction, - REMOVE_TASK_BY_METADATA_QUERY, - &[&uniq_hash], - None, - ) - .await - } - - async fn remove_tasks_type_query( - transaction: &mut Transaction<'_>, - task_type: &str, - ) -> Result { - Self::execute_query(transaction, REMOVE_TASKS_TYPE_QUERY, &[&task_type], None).await - } - - async fn find_task_by_id_query( - transaction: &mut Transaction<'_>, - id: Uuid, - ) -> Result { - let row: Row = transaction.query_one(FIND_TASK_BY_ID_QUERY, &[&id]).await?; - - let task = Self::row_to_task(row); - Ok(task) - } - - async fn fail_task_query( - transaction: &mut Transaction<'_>, - task: &Task, - error_message: &str, - ) -> Result { - let updated_at = Utc::now(); - - let row: Row = transaction - .query_one( - FAIL_TASK_QUERY, - &[ - &FangTaskState::Failed, - &error_message, - &updated_at, - &task.id, - ], - ) - .await?; - let failed_task = Self::row_to_task(row); - Ok(failed_task) - } - - async fn schedule_retry_query( - transaction: &mut Transaction<'_>, - task: &Task, - backoff_seconds: u32, - error: &str, - ) -> Result { - let now = Utc::now(); - let scheduled_at = now + Duration::seconds(backoff_seconds as i64); - let retries = task.retries + 1; - - let row: Row = transaction - .query_one( - RETRY_TASK_QUERY, - &[&error, &retries, &scheduled_at, &now, &task.id], - ) - .await?; - let failed_task = Self::row_to_task(row); - Ok(failed_task) - } - async fn fetch_and_touch_task_query( - transaction: &mut Transaction<'_>, + pool: &InternalPool, + backend: &BackendSqlX, task_type: Option, ) -> Result, AsyncQueueError> { let task_type = match task_type { @@ -365,166 +308,77 @@ where None => DEFAULT_TASK_TYPE.to_string(), }; - let task = match Self::get_task_type_query(transaction, &task_type).await { - Ok(some_task) => Some(some_task), - Err(_) => None, - }; + let query_params = QueryParams::builder().task_type(&task_type).build(); + + let task = backend + .execute_query(SqlXQuery::FetchTaskType, pool, query_params) + .await + .map(|val| val.unwrap_task()) + .ok(); + let result_task = if let Some(some_task) = task { - Some( - Self::update_task_state_query(transaction, &some_task, FangTaskState::InProgress) - .await?, - ) + let query_params = QueryParams::builder() + .uuid(&some_task.id) + .state(FangTaskState::InProgress) + .build(); + + let task = backend + .execute_query(SqlXQuery::UpdateTaskState, pool, query_params) + .await? + .unwrap_task(); + + Some(task) } else { None }; Ok(result_task) } - async fn get_task_type_query( - transaction: &mut Transaction<'_>, - task_type: &str, - ) -> Result { - let row: Row = transaction - .query_one(FETCH_TASK_TYPE_QUERY, &[&task_type, &Utc::now()]) - .await?; - - let task = Self::row_to_task(row); - - Ok(task) - } - - async fn update_task_state_query( - transaction: &mut Transaction<'_>, - task: &Task, - state: FangTaskState, - ) -> Result { - let updated_at = Utc::now(); - - let row: Row = transaction - .query_one(UPDATE_TASK_STATE_QUERY, &[&state, &updated_at, &task.id]) - .await?; - let task = Self::row_to_task(row); - Ok(task) - } - async fn insert_task_query( - transaction: &mut Transaction<'_>, - metadata: serde_json::Value, - task_type: &str, - scheduled_at: DateTime, - ) -> Result { - let row: Row = transaction - .query_one(INSERT_TASK_QUERY, &[&metadata, &task_type, &scheduled_at]) - .await?; - let task = Self::row_to_task(row); - Ok(task) - } - - async fn insert_task_uniq_query( - transaction: &mut Transaction<'_>, - metadata: serde_json::Value, + pool: &InternalPool, + backend: &BackendSqlX, + metadata: &serde_json::Value, task_type: &str, - scheduled_at: DateTime, + scheduled_at: &DateTime, ) -> Result { - let uniq_hash = Self::calculate_hash(metadata.to_string()); + let query_params = QueryParams::builder() + .metadata(metadata) + .task_type(task_type) + .scheduled_at(scheduled_at) + .build(); - let row: Row = transaction - .query_one( - INSERT_TASK_UNIQ_QUERY, - &[&metadata, &task_type, &uniq_hash, &scheduled_at], - ) - .await?; + let task = backend + .execute_query(SqlXQuery::InsertTask, pool, query_params) + .await? + .unwrap_task(); - let task = Self::row_to_task(row); Ok(task) } - async fn execute_query( - transaction: &mut Transaction<'_>, - query: &str, - params: &[&(dyn ToSql + Sync)], - expected_result_count: Option, - ) -> Result { - let result = transaction.execute(query, params).await?; - - if let Some(expected_result) = expected_result_count { - if result != expected_result { - return Err(AsyncQueueError::ResultError { - expected: expected_result, - found: result, - }); - } - } - Ok(result) - } - async fn insert_task_if_not_exist_query( - transaction: &mut Transaction<'_>, - metadata: serde_json::Value, + pool: &InternalPool, + backend: &BackendSqlX, + metadata: &serde_json::Value, task_type: &str, - scheduled_at: DateTime, + scheduled_at: &DateTime, ) -> Result { - match Self::find_task_by_uniq_hash_query(transaction, &metadata).await { - Some(task) => Ok(task), - None => { - Self::insert_task_uniq_query(transaction, metadata, task_type, scheduled_at).await - } - } - } - - fn calculate_hash(json: String) -> String { - let mut hasher = Sha256::new(); - hasher.update(json.as_bytes()); - let result = hasher.finalize(); - hex::encode(result) - } - - async fn find_task_by_uniq_hash_query( - transaction: &mut Transaction<'_>, - metadata: &serde_json::Value, - ) -> Option { - let uniq_hash = Self::calculate_hash(metadata.to_string()); - - let result = transaction - .query_one(FIND_TASK_BY_UNIQ_HASH_QUERY, &[&uniq_hash]) - .await; - - match result { - Ok(row) => Some(Self::row_to_task(row)), - Err(_) => None, - } - } - - fn row_to_task(row: Row) -> Task { - let id: Uuid = row.get("id"); - let metadata: serde_json::Value = row.get("metadata"); - - let error_message: Option = row.try_get("error_message").ok(); - - let uniq_hash: Option = row.try_get("uniq_hash").ok(); - let state: FangTaskState = row.get("state"); - let task_type: String = row.get("task_type"); - let retries: i32 = row.get("retries"); - let created_at: DateTime = row.get("created_at"); - let updated_at: DateTime = row.get("updated_at"); - let scheduled_at: DateTime = row.get("scheduled_at"); - - Task::builder() - .id(id) + let query_params = QueryParams::builder() .metadata(metadata) - .error_message(error_message) - .state(state) - .uniq_hash(uniq_hash) .task_type(task_type) - .retries(retries) - .created_at(created_at) - .updated_at(updated_at) .scheduled_at(scheduled_at) - .build() + .build(); + + let task = backend + .execute_query(SqlXQuery::InsertTaskIfNotExists, pool, query_params) + .await? + .unwrap_task(); + + Ok(task) } async fn schedule_task_query( - transaction: &mut Transaction<'_>, + pool: &InternalPool, + backend: &BackendSqlX, task: &dyn AsyncRunnable, ) -> Result { let metadata = serde_json::to_value(task)?; @@ -548,13 +402,15 @@ where }; let task: Task = if !task.uniq() { - Self::insert_task_query(transaction, metadata, &task.task_type(), scheduled_at).await? + Self::insert_task_query(pool, backend, &metadata, &task.task_type(), &scheduled_at) + .await? } else { Self::insert_task_if_not_exist_query( - transaction, - metadata, + pool, + backend, + &metadata, &task.task_type(), - scheduled_at, + &scheduled_at, ) .await? }; @@ -563,124 +419,137 @@ where } #[async_trait] -impl AsyncQueueable for AsyncQueue -where - Tls: MakeTlsConnect + Clone + Send + Sync + 'static, - >::Stream: Send + Sync, - >::TlsConnect: Send, - <>::TlsConnect as TlsConnect>::Future: Send, -{ - async fn find_task_by_id(&mut self, id: Uuid) -> Result { +impl AsyncQueueable for AsyncQueue { + async fn find_task_by_id(&self, id: &Uuid) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let pool = &self.pool; - let task = Self::find_task_by_id_query(&mut transaction, id).await?; + let backend = pool.backend()?; - transaction.commit().await?; + let query_params = QueryParams::builder().uuid(id).build(); + + let task = backend + .execute_query(SqlXQuery::FindTaskById, pool, query_params) + .await? + .unwrap_task(); Ok(task) } async fn fetch_and_touch_task( - &mut self, + &self, task_type: Option, ) -> Result, AsyncQueueError> { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + // this unwrap is safe because we check if connection is established + let pool = &self.pool; - let task = Self::fetch_and_touch_task_query(&mut transaction, task_type).await?; + let backend = pool.backend()?; - transaction.commit().await?; + let task = Self::fetch_and_touch_task_query(pool, &backend, task_type).await?; Ok(task) } - async fn insert_task(&mut self, task: &dyn AsyncRunnable) -> Result { + async fn insert_task(&self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + // this unwrap is safe because we check if connection is established + let pool = &self.pool; + let backend = pool.backend()?; let metadata = serde_json::to_value(task)?; - let task: Task = if !task.uniq() { - Self::insert_task_query(&mut transaction, metadata, &task.task_type(), Utc::now()) + let task = if !task.uniq() { + Self::insert_task_query(pool, &backend, &metadata, &task.task_type(), &Utc::now()) .await? } else { Self::insert_task_if_not_exist_query( - &mut transaction, - metadata, + pool, + &backend, + &metadata, &task.task_type(), - Utc::now(), + &Utc::now(), ) .await? }; - transaction.commit().await?; - Ok(task) } - async fn schedule_task(&mut self, task: &dyn AsyncRunnable) -> Result { + async fn schedule_task(&self, task: &dyn AsyncRunnable) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + // this unwrap is safe because we check if connection is established + let pool = &self.pool; + let backend = pool.backend()?; - let task = Self::schedule_task_query(&mut transaction, task).await?; + let task = Self::schedule_task_query(pool, &backend, task).await?; - transaction.commit().await?; Ok(task) } - async fn remove_all_tasks(&mut self) -> Result { + async fn remove_all_tasks(&self) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + // this unwrap is safe because we check if connection is established + let pool = &self.pool; + let backend = pool.backend()?; - let result = Self::remove_all_tasks_query(&mut transaction).await?; + let query_params = QueryParams::builder().build(); - transaction.commit().await?; + let result = backend + .execute_query(SqlXQuery::RemoveAllTask, pool, query_params) + .await? + .unwrap_u64(); Ok(result) } - async fn remove_all_scheduled_tasks(&mut self) -> Result { + async fn remove_all_scheduled_tasks(&self) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + // this unwrap is safe because we check if connection is established + let pool = &self.pool; + + let backend = pool.backend()?; - let result = Self::remove_all_scheduled_tasks_query(&mut transaction).await?; + let query_params = QueryParams::builder().build(); - transaction.commit().await?; + let result = backend + .execute_query(SqlXQuery::RemoveAllScheduledTask, pool, query_params) + .await? + .unwrap_u64(); Ok(result) } - async fn remove_task(&mut self, id: Uuid) -> Result { + async fn remove_task(&self, id: &Uuid) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let pool = &self.pool; + let backend = pool.backend()?; - let result = Self::remove_task_query(&mut transaction, id).await?; + let query_params = QueryParams::builder().uuid(id).build(); - transaction.commit().await?; + let result = backend + .execute_query(SqlXQuery::RemoveTask, pool, query_params) + .await? + .unwrap_u64(); Ok(result) } async fn remove_task_by_metadata( - &mut self, + &self, task: &dyn AsyncRunnable, ) -> Result { if task.uniq() { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let pool = &self.pool; + let backend = pool.backend()?; - let result = Self::remove_task_by_metadata_query(&mut transaction, task).await?; + let query_params = QueryParams::builder().runnable(task).build(); - transaction.commit().await?; + let result = backend + .execute_query(SqlXQuery::RemoveTaskByMetadata, pool, query_params) + .await? + .unwrap_u64(); Ok(result) } else { @@ -688,65 +557,226 @@ where } } - async fn remove_tasks_type(&mut self, task_type: &str) -> Result { + async fn remove_tasks_type(&self, task_type: &str) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let pool = &self.pool; + let backend = pool.backend()?; - let result = Self::remove_tasks_type_query(&mut transaction, task_type).await?; + let query_params = QueryParams::builder().task_type(task_type).build(); - transaction.commit().await?; + let result = backend + .execute_query(SqlXQuery::RemoveTaskType, pool, query_params) + .await? + .unwrap_u64(); Ok(result) } async fn update_task_state( - &mut self, + &self, task: &Task, state: FangTaskState, ) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let pool = &self.pool; + let backend = pool.backend()?; - let task = Self::update_task_state_query(&mut transaction, task, state).await?; - transaction.commit().await?; + let query_params = QueryParams::builder().uuid(&task.id).state(state).build(); + + let task = backend + .execute_query(SqlXQuery::UpdateTaskState, pool, query_params) + .await? + .unwrap_task(); Ok(task) } - async fn fail_task( - &mut self, - task: &Task, - error_message: &str, - ) -> Result { + async fn fail_task(&self, task: &Task, error_message: &str) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; + let pool = &self.pool; + let backend = pool.backend()?; + + let query_params = QueryParams::builder() + .error_message(error_message) + .task(task) + .build(); - let task = Self::fail_task_query(&mut transaction, task, error_message).await?; - transaction.commit().await?; + let failed_task = backend + .execute_query(SqlXQuery::FailTask, pool, query_params) + .await? + .unwrap_task(); - Ok(task) + Ok(failed_task) } async fn schedule_retry( - &mut self, + &self, task: &Task, backoff_seconds: u32, error: &str, ) -> Result { self.check_if_connection()?; - let mut connection = self.pool.as_ref().unwrap().get().await?; - let mut transaction = connection.transaction().await?; - let task = - Self::schedule_retry_query(&mut transaction, task, backoff_seconds, error).await?; - transaction.commit().await?; + let pool = &self.pool; + let backend = pool.backend()?; - Ok(task) + let query_params = QueryParams::builder() + .backoff_seconds(backoff_seconds) + .error_message(error) + .task(task) + .build(); + + let failed_task = backend + .execute_query(SqlXQuery::RetryTask, pool, query_params) + .await? + .unwrap_task(); + + Ok(failed_task) } } -#[cfg(test)] -test_asynk_queue! {postgres, crate::AsyncQueue, crate::AsyncQueue::::test()} +#[cfg(all(test, feature = "asynk-postgres"))] +impl AsyncQueue { + /// Provides an AsyncQueue connected to its own DB + pub async fn test_postgres() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let base_url = env::var("POSTGRES_BASE_URL").expect("Base URL for Postgres not found"); + let base_db = env::var("POSTGRES_DB").expect("Name for base Postgres DB not found"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("{}/{}", base_url, base_db)) + .build(); + + let mut new_number = ASYNC_QUEUE_POSTGRES_TEST_COUNTER.lock().await; + res.connect().await.unwrap(); + + let db_name = format!("async_queue_test_{}", *new_number); + *new_number += 1; + + let create_query: &str = &format!("CREATE DATABASE {} WITH TEMPLATE fang;", db_name); + let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); + + let mut conn = res.pool.unwrap_pg_pool().acquire().await.unwrap(); + + log::info!("Deleting database {db_name} ..."); + conn.execute(delete_query).await.unwrap(); + + log::info!("Creating database {db_name} ..."); + let expected_error: &str = &format!( + "source database \"{}\" is being accessed by other users", + base_db + ); + while let Err(e) = conn.execute(create_query).await { + if e.as_database_error().unwrap().message() != expected_error { + panic!("{:?}", e); + } + } + + log::info!("Database {db_name} created !!"); + + res.connected = false; + res.pool = InternalPool::NoBackend; + res.uri = format!("{}/{}", base_url, db_name); + res.connect().await.unwrap(); + + res + } +} + +#[cfg(all(test, feature = "asynk-sqlite"))] +impl AsyncQueue { + /// Provides an AsyncQueue connected to its own DB + pub async fn test_sqlite() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let tests_dir = env::var("SQLITE_TESTS_DIR").expect("Name for tests directory not found"); + let base_file = env::var("SQLITE_FILE").expect("Name for SQLite DB file not found"); + let sqlite_file = format!("../{}", base_file); + + let mut new_number = ASYNC_QUEUE_SQLITE_TEST_COUNTER.lock().await; + + let db_name = format!("../{}/async_queue_test_{}.db", tests_dir, *new_number); + *new_number += 1; + + let path = Path::new(&db_name); + + if path.exists() { + log::info!("Deleting database {db_name} ..."); + std::fs::remove_file(path).unwrap(); + } + + log::info!("Creating database {db_name} ..."); + std::fs::copy(sqlite_file, &db_name).unwrap(); + log::info!("Database {db_name} created !!"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("sqlite://{}", db_name)) + .build(); + + res.connect().await.expect("fail to connect"); + res + } +} + +#[cfg(all(test, feature = "asynk-mysql"))] +impl AsyncQueue { + /// Provides an AsyncQueue connected to its own DB + pub async fn test_mysql() -> Self { + dotenvy::dotenv().expect(".env file not found"); + let base_url = env::var("MYSQL_BASE_URL").expect("Base URL for MySQL not found"); + let base_db = env::var("MYSQL_DB").expect("Name for base MySQL DB not found"); + + let mut res = Self::builder() + .max_pool_size(1_u32) + .uri(format!("{}/{}", base_url, base_db)) + .build(); + + let mut new_number = ASYNC_QUEUE_MYSQL_TEST_COUNTER.lock().await; + res.connect().await.unwrap(); + + let db_name = format!("async_queue_test_{}", *new_number); + *new_number += 1; + + let create_query: &str = &format!( + "CREATE DATABASE {}; CREATE TABLE {}.fang_tasks LIKE fang.fang_tasks;", + db_name, db_name + ); + + let delete_query: &str = &format!("DROP DATABASE IF EXISTS {};", db_name); + + let mut conn = res.pool.unwrap_mysql_pool().acquire().await.unwrap(); + + log::info!("Deleting database {db_name} ..."); + conn.execute(delete_query).await.unwrap(); + + log::info!("Creating database {db_name} ..."); + let expected_error: &str = &format!( + "source database \"{}\" is being accessed by other users", + base_db + ); + while let Err(e) = conn.execute(create_query).await { + if e.as_database_error().unwrap().message() != expected_error { + panic!("{:?}", e); + } + } + + log::info!("Database {db_name} created !!"); + + res.connected = false; + res.pool = InternalPool::NoBackend; + res.uri = format!("{}/{}", base_url, db_name); + res.connect().await.unwrap(); + + res + } +} + +#[cfg(all(test, feature = "asynk-postgres"))] +test_asynk_queue! {postgres, crate::AsyncQueue,crate::AsyncQueue::test_postgres()} + +#[cfg(all(test, feature = "asynk-sqlite"))] +test_asynk_queue! {sqlite, crate::AsyncQueue,crate::AsyncQueue::test_sqlite()} + +#[cfg(all(test, feature = "asynk-mysql"))] +test_asynk_queue! {mysql, crate::AsyncQueue, crate::AsyncQueue::test_mysql()} diff --git a/fang/src/asynk/async_queue/async_queue_tests.rs b/fang/src/asynk/async_queue/async_queue_tests.rs index f6ea2ece..3f60a738 100644 --- a/fang/src/asynk/async_queue/async_queue_tests.rs +++ b/fang/src/asynk/async_queue/async_queue_tests.rs @@ -15,7 +15,7 @@ pub(crate) struct AsyncTask { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } } @@ -28,7 +28,7 @@ pub(crate) struct AsyncUniqTask { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncUniqTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -46,7 +46,7 @@ pub(crate) struct AsyncTaskSchedule { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTaskSchedule { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -77,7 +77,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn insert_task_creates_new_task() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -91,7 +91,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn update_task_state_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -113,8 +113,8 @@ macro_rules! test_asynk_queue { } #[tokio::test] - async fn failed_task_query_test() { - let mut test: $q = $e.await; + async fn failed_task_test() { + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -135,7 +135,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_all_tasks_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -161,7 +161,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn schedule_task_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let datetime = (Utc::now() + Duration::seconds(7)).round_subsecs(0); @@ -183,7 +183,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_all_scheduled_tasks_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let datetime = (Utc::now() + Duration::seconds(7)).round_subsecs(0); @@ -207,7 +207,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn fetch_and_touch_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -247,7 +247,7 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_tasks_type_test() { - let mut test: $q = $e.await; + let test: $q = $e.await; let task = test.insert_task(&AsyncTask { number: 1 }).await.unwrap(); @@ -261,6 +261,7 @@ macro_rules! test_asynk_queue { let task = test.insert_task(&AsyncTask { number: 2 }).await.unwrap(); let metadata = task.metadata.as_object().unwrap(); + let number = metadata["number"].as_u64(); let type_task = metadata["type"].as_str(); @@ -268,6 +269,7 @@ macro_rules! test_asynk_queue { assert_eq!(Some("AsyncTask"), type_task); let result = test.remove_tasks_type("mytype").await.unwrap(); + assert_eq!(0, result); let result = test.remove_tasks_type("common").await.unwrap(); @@ -276,7 +278,8 @@ macro_rules! test_asynk_queue { #[tokio::test] async fn remove_tasks_by_metadata() { - let mut test: $q = $e.await; + //console_subscriber::init(); + let test: $q = $e.await; let task = test .insert_task(&AsyncUniqTask { number: 1 }) diff --git a/fang/src/asynk/async_runnable.rs b/fang/src/asynk/async_runnable.rs index bde2bed3..bc0852b3 100644 --- a/fang/src/asynk/async_runnable.rs +++ b/fang/src/asynk/async_runnable.rs @@ -3,9 +3,8 @@ use crate::asynk::async_queue::AsyncQueueable; use crate::FangError; use crate::Scheduled; use async_trait::async_trait; -use bb8_postgres::bb8::RunError; -use bb8_postgres::tokio_postgres::Error as TokioPostgresError; use serde_json::Error as SerdeError; +use sqlx::Error as SqlXError; const COMMON_TYPE: &str = "common"; pub const RETRIES_NUMBER: i32 = 20; @@ -19,18 +18,11 @@ impl From for FangError { } } -impl From for FangError { - fn from(error: TokioPostgresError) -> Self { - Self::from(AsyncQueueError::PgError(error)) +impl From for FangError { + fn from(error: SqlXError) -> Self { + Self::from(AsyncQueueError::SqlXError(error)) } } - -impl From> for FangError { - fn from(error: RunError) -> Self { - Self::from(AsyncQueueError::PoolError(error)) - } -} - impl From for FangError { fn from(error: SerdeError) -> Self { Self::from(AsyncQueueError::SerdeError(error)) @@ -42,7 +34,7 @@ impl From for FangError { #[async_trait] pub trait AsyncRunnable: Send + Sync { /// Execute the task. This method should define its logic - async fn run(&self, client: &mut dyn AsyncQueueable) -> Result<(), FangError>; + async fn run(&self, client: &dyn AsyncQueueable) -> Result<(), FangError>; /// Define the type of the task. /// The `common` task type is used by default diff --git a/fang/src/asynk/async_worker.rs b/fang/src/asynk/async_worker.rs index 79698546..e9f1360f 100644 --- a/fang/src/asynk/async_worker.rs +++ b/fang/src/asynk/async_worker.rs @@ -29,8 +29,8 @@ impl AsyncWorker where AQueue: AsyncQueueable + Clone + Sync + 'static, { - async fn run(&mut self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { - let result = runnable.run(&mut self.queue).await; + async fn run(&self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { + let result = runnable.run(&self.queue).await; match result { Ok(_) => self.finalize_task(task, &result).await?, @@ -52,7 +52,7 @@ where } async fn finalize_task( - &mut self, + &self, task: &Task, result: &Result<(), FangError>, ) -> Result<(), FangError> { @@ -68,11 +68,11 @@ where } }, RetentionMode::RemoveAll => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } RetentionMode::RemoveFinished => match result { Ok(_) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } Err(error) => { self.queue.fail_task(task, &error.description).await?; @@ -99,7 +99,7 @@ where { Ok(Some(task)) => { let actual_task: Box = - serde_json::from_value(task.metadata.clone()).unwrap(); + serde_json::from_value(task.metadata.clone())?; let cron = actual_task.cron(); @@ -143,11 +143,7 @@ pub struct AsyncWorkerTest<'a> { #[cfg(test)] impl<'a> AsyncWorkerTest<'a> { - pub async fn run( - &mut self, - task: &Task, - runnable: &dyn AsyncRunnable, - ) -> Result<(), FangError> { + pub async fn run(&self, task: &Task, runnable: &dyn AsyncRunnable) -> Result<(), FangError> { let result = runnable.run(self.queue).await; match result { @@ -170,7 +166,7 @@ impl<'a> AsyncWorkerTest<'a> { } async fn finalize_task( - &mut self, + &self, task: &Task, result: &Result<(), FangError>, ) -> Result<(), FangError> { @@ -187,15 +183,15 @@ impl<'a> AsyncWorkerTest<'a> { }, RetentionMode::RemoveAll => match result { Ok(_) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } Err(_error) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } }, RetentionMode::RemoveFinished => match result { Ok(_) => { - self.queue.remove_task(task.id).await?; + self.queue.remove_task(&task.id).await?; } Err(error) => { self.queue.fail_task(task, &error.description).await?; @@ -260,7 +256,6 @@ mod async_worker_tests { use crate::RetentionMode; use crate::Scheduled; use async_trait::async_trait; - use bb8_postgres::tokio_postgres::NoTls; use chrono::Duration; use chrono::Utc; use serde::{Deserialize, Serialize}; @@ -273,7 +268,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for WorkerAsyncTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } } @@ -286,7 +281,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for WorkerAsyncTaskSchedule { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } fn cron(&self) -> Option { @@ -300,7 +295,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for WorkerAsyncTaskScheduled { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { log::info!("WorkerAsyncTaskScheduled has been run"); tokio::time::sleep(std::time::Duration::from_millis(2050)).await; Ok(()) @@ -323,7 +318,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncFailedTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { let message = format!("number {} is wrong :(", self.number); Err(FangError { @@ -342,7 +337,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncRetryTask { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { let message = "Failed".to_string(); Err(FangError { @@ -361,7 +356,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTaskType1 { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -376,7 +371,7 @@ mod async_worker_tests { #[typetag::serde] #[async_trait] impl AsyncRunnable for AsyncTaskType2 { - async fn run(&self, _queueable: &mut dyn AsyncQueueable) -> Result<(), FangError> { + async fn run(&self, _queueable: &dyn AsyncQueueable) -> Result<(), FangError> { Ok(()) } @@ -387,27 +382,27 @@ mod async_worker_tests { #[tokio::test] async fn execute_and_finishes_task() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let actual_task = WorkerAsyncTask { number: 1 }; let task = insert_task(&mut test, &actual_task).await; let id = task.id; - let mut worker = AsyncWorkerTest::builder() + let worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) .retention_mode(RetentionMode::KeepAll) .build(); worker.run(&task, &actual_task).await.unwrap(); - let task_finished = test.find_task_by_id(id).await.unwrap(); + let task_finished = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task_finished.id); assert_eq!(FangTaskState::Finished, task_finished.state); } #[tokio::test] async fn schedule_task_test() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let actual_task = WorkerAsyncTaskSchedule { number: 1 }; @@ -422,7 +417,7 @@ mod async_worker_tests { worker.run_tasks_until_none().await.unwrap(); - let task = worker.queue.find_task_by_id(id).await.unwrap(); + let task = worker.queue.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::New, task.state); @@ -431,14 +426,14 @@ mod async_worker_tests { worker.run_tasks_until_none().await.unwrap(); - let task = test.find_task_by_id(id).await.unwrap(); + let task = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Finished, task.state); } #[tokio::test] async fn retries_task_test() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let actual_task = AsyncRetryTask {}; @@ -453,7 +448,7 @@ mod async_worker_tests { worker.run_tasks_until_none().await.unwrap(); - let task = worker.queue.find_task_by_id(id).await.unwrap(); + let task = worker.queue.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Retried, task.state); @@ -462,7 +457,7 @@ mod async_worker_tests { tokio::time::sleep(core::time::Duration::from_secs(5)).await; worker.run_tasks_until_none().await.unwrap(); - let task = worker.queue.find_task_by_id(id).await.unwrap(); + let task = worker.queue.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Retried, task.state); @@ -471,7 +466,7 @@ mod async_worker_tests { tokio::time::sleep(core::time::Duration::from_secs(10)).await; worker.run_tasks_until_none().await.unwrap(); - let task = test.find_task_by_id(id).await.unwrap(); + let task = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task.id); assert_eq!(FangTaskState::Failed, task.state); assert_eq!("Failed".to_string(), task.error_message.unwrap()); @@ -479,19 +474,19 @@ mod async_worker_tests { #[tokio::test] async fn saves_error_for_failed_task() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let failed_task = AsyncFailedTask { number: 1 }; let task = insert_task(&mut test, &failed_task).await; let id = task.id; - let mut worker = AsyncWorkerTest::builder() + let worker = AsyncWorkerTest::builder() .queue(&mut test as &mut dyn AsyncQueueable) .retention_mode(RetentionMode::KeepAll) .build(); worker.run(&task, &failed_task).await.unwrap(); - let task_finished = test.find_task_by_id(id).await.unwrap(); + let task_finished = test.find_task_by_id(&id).await.unwrap(); assert_eq!(id, task_finished.id); assert_eq!(FangTaskState::Failed, task_finished.state); @@ -503,7 +498,7 @@ mod async_worker_tests { #[tokio::test] async fn executes_task_only_of_specific_type() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let task1 = insert_task(&mut test, &AsyncTaskType1 {}).await; let task12 = insert_task(&mut test, &AsyncTaskType1 {}).await; @@ -520,9 +515,9 @@ mod async_worker_tests { .build(); worker.run_tasks_until_none().await.unwrap(); - let task1 = test.find_task_by_id(id1).await.unwrap(); - let task12 = test.find_task_by_id(id12).await.unwrap(); - let task2 = test.find_task_by_id(id2).await.unwrap(); + let task1 = test.find_task_by_id(&id1).await.unwrap(); + let task12 = test.find_task_by_id(&id12).await.unwrap(); + let task2 = test.find_task_by_id(&id2).await.unwrap(); assert_eq!(id1, task1.id); assert_eq!(id12, task12.id); @@ -534,7 +529,7 @@ mod async_worker_tests { #[tokio::test] async fn remove_when_finished() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let task1 = insert_task(&mut test, &AsyncTaskType1 {}).await; let task12 = insert_task(&mut test, &AsyncTaskType1 {}).await; @@ -564,13 +559,13 @@ mod async_worker_tests { assert_eq!(id2, task2.id); } - async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { + async fn insert_task(test: &mut AsyncQueue, task: &dyn AsyncRunnable) -> Task { test.insert_task(task).await.unwrap() } #[tokio::test] async fn no_schedule_until_run() { - let mut test = AsyncQueue::::test().await; + let mut test = AsyncQueue::test_postgres().await; let _task_1 = test .schedule_task(&WorkerAsyncTaskScheduled {}) diff --git a/fang/src/asynk/backend_sqlx.rs b/fang/src/asynk/backend_sqlx.rs new file mode 100644 index 00000000..4093ed13 --- /dev/null +++ b/fang/src/asynk/backend_sqlx.rs @@ -0,0 +1,430 @@ +use chrono::{DateTime, Duration, Utc}; +use sha2::Digest; +use sha2::Sha256; +use sqlx::any::AnyQueryResult; +use sqlx::database::HasArguments; +use sqlx::Database; +use sqlx::Encode; +use sqlx::Executor; +use sqlx::FromRow; +use sqlx::IntoArguments; +use sqlx::Pool; +use sqlx::Type; +use std::fmt::Debug; +use typed_builder::TypedBuilder; +use uuid::Uuid; + +#[cfg(feature = "asynk-postgres")] +mod postgres; +#[cfg(feature = "asynk-postgres")] +use self::postgres::BackendSqlXPg; + +#[cfg(feature = "asynk-sqlite")] +mod sqlite; +#[cfg(feature = "asynk-sqlite")] +use self::sqlite::BackendSqlXSQLite; +#[cfg(feature = "asynk-mysql")] +mod mysql; +#[cfg(feature = "asynk-mysql")] +use self::mysql::BackendSqlXMySQL; + +#[derive(Debug, Clone)] +pub(crate) enum BackendSqlX { + #[cfg(feature = "asynk-postgres")] + Pg, + + #[cfg(feature = "asynk-sqlite")] + Sqlite, + + #[cfg(feature = "asynk-mysql")] + MySql, +} + +#[allow(dead_code)] +#[derive(TypedBuilder, Clone)] +pub(crate) struct QueryParams<'a> { + #[builder(default, setter(strip_option))] + uuid: Option<&'a Uuid>, + #[builder(default, setter(strip_option))] + metadata: Option<&'a serde_json::Value>, + #[builder(default, setter(strip_option))] + task_type: Option<&'a str>, + #[builder(default, setter(strip_option))] + scheduled_at: Option<&'a DateTime>, + #[builder(default, setter(strip_option))] + state: Option, + #[builder(default, setter(strip_option))] + error_message: Option<&'a str>, + #[builder(default, setter(strip_option))] + runnable: Option<&'a dyn AsyncRunnable>, + #[builder(default, setter(strip_option))] + backoff_seconds: Option, + #[builder(default, setter(strip_option))] + task: Option<&'a Task>, +} + +#[allow(dead_code)] +pub(crate) enum Res { + Bigint(u64), + Task(Task), +} + +impl Res { + pub(crate) fn unwrap_u64(self) -> u64 { + match self { + Res::Bigint(val) => val, + _ => panic!("Can not unwrap a u64"), + } + } + + pub(crate) fn unwrap_task(self) -> Task { + match self { + Res::Task(task) => task, + _ => panic!("Can not unwrap a task"), + } + } +} + +impl BackendSqlX { + pub(crate) async fn execute_query( + &self, + _query: SqlXQuery, + _pool: &InternalPool, + _params: QueryParams<'_>, + ) -> Result { + match *self { + #[cfg(feature = "asynk-postgres")] + BackendSqlX::Pg => { + BackendSqlXPg::execute_query(_query, _pool.unwrap_pg_pool(), _params).await + } + #[cfg(feature = "asynk-sqlite")] + BackendSqlX::Sqlite => { + BackendSqlXSQLite::execute_query(_query, _pool.unwrap_sqlite_pool(), _params).await + } + #[cfg(feature = "asynk-mysql")] + BackendSqlX::MySql => { + BackendSqlXMySQL::execute_query(_query, _pool.unwrap_mysql_pool(), _params).await + } + } + } + + // I think it is useful to have this method, although it is not used + pub(crate) fn _name(&self) -> &str { + match *self { + #[cfg(feature = "asynk-postgres")] + BackendSqlX::Pg => BackendSqlXPg::_name(), + #[cfg(feature = "asynk-sqlite")] + BackendSqlX::Sqlite => BackendSqlXSQLite::_name(), + #[cfg(feature = "asynk-mysql")] + BackendSqlX::MySql => BackendSqlXMySQL::_name(), + } + } +} + +#[derive(Debug, Clone)] +pub(crate) enum SqlXQuery { + InsertTask, + UpdateTaskState, + FailTask, + RemoveAllTask, + RemoveAllScheduledTask, + RemoveTask, + RemoveTaskByMetadata, + RemoveTaskType, + FetchTaskType, + FindTaskById, + RetryTask, + InsertTaskIfNotExists, +} + +use crate::AsyncQueueError; +use crate::AsyncRunnable; +use crate::FangTaskState; +use crate::InternalPool; +use crate::Task; + +#[allow(dead_code)] +pub(crate) fn calculate_hash(json: &str) -> String { + let mut hasher = Sha256::new(); + hasher.update(json.as_bytes()); + let result = hasher.finalize(); + hex::encode(result) +} + +trait FangQueryable +where + DB: Database, + for<'r> Task: FromRow<'r, ::Row>, + for<'r> std::string::String: Encode<'r, DB> + Type, + for<'r> &'r str: Encode<'r, DB> + Type, + for<'r> i32: Encode<'r, DB> + Type, + for<'r> DateTime: Encode<'r, DB> + Type, + for<'r> &'r Uuid: Encode<'r, DB> + Type, + for<'r> &'r serde_json::Value: Encode<'r, DB> + Type, + for<'r> &'r Pool: Executor<'r, Database = DB>, + for<'r> >::Arguments: IntoArguments<'r, DB>, + ::QueryResult: Into, +{ + async fn fetch_task_type( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + // Unwraps by QueryParams are safe because the responsibility is of the caller + // and the caller is the library itself + let task_type = params.task_type.unwrap(); + + let now = Utc::now(); + + let task: Task = sqlx::query_as(query) + .bind(task_type) + .bind(now) + .fetch_one(pool) + .await?; + + Ok(task) + } + + async fn find_task_by_uniq_hash( + query: &str, + pool: &Pool, + params: &QueryParams<'_>, + ) -> Option { + let metadata = params.metadata.unwrap(); + + let uniq_hash = calculate_hash(&metadata.to_string()); + + sqlx::query_as(query) + .bind(uniq_hash) + .fetch_one(pool) + .await + .ok() + } + + async fn find_task_by_id( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = params.uuid.unwrap(); + + let task: Task = sqlx::query_as(query).bind(uuid).fetch_one(pool).await?; + + Ok(task) + } + + async fn retry_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let now = Utc::now(); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + + let task = params.task.unwrap(); + let retries = task.retries + 1; + + let uuid = task.id; + + let error = params.error_message.unwrap(); + + let failed_task: Task = sqlx::query_as(query) + .bind(error) + .bind(retries) + .bind(scheduled_at) + .bind(now) + .bind(&uuid) + .fetch_one(pool) + .await?; + + Ok(failed_task) + } + + async fn insert_task_uniq( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + let scheduled_at = params.scheduled_at.unwrap(); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let task: Task = sqlx::query_as(query) + .bind(&uuid) + .bind(metadata) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at) + .fetch_one(pool) + .await?; + Ok(task) + } + + async fn insert_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + + let scheduled_at = params.scheduled_at.unwrap(); + + let metadata = params.metadata.unwrap(); + let task_type = params.task_type.unwrap(); + + let task: Task = sqlx::query_as(query) + .bind(&uuid) + .bind(metadata) + .bind(task_type) + .bind(scheduled_at) + .fetch_one(pool) + .await?; + + Ok(task) + } + + async fn update_task_state( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at = Utc::now(); + + let state_str: &str = params.state.unwrap().into(); + + let uuid = params.uuid.unwrap(); + + let task: Task = sqlx::query_as(query) + .bind(state_str) + .bind(updated_at) + .bind(uuid) + .fetch_one(pool) + .await?; + + Ok(task) + } + + async fn fail_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at = Utc::now(); + + let uuid = params.task.unwrap().id; + + let error_message = params.error_message.unwrap(); + + let failed_task: Task = sqlx::query_as(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(&uuid) + .fetch_one(pool) + .await?; + + Ok(failed_task) + } + + async fn remove_all_task(query: &str, pool: &Pool) -> Result { + // This converts QueryResult to AnyQueryResult and then to u64 + // do not delete into() method and do not delete Into trait bound + Ok(sqlx::query(query) + .execute(pool) + .await? + .into() + .rows_affected()) + } + + async fn remove_all_scheduled_tasks( + query: &str, + pool: &Pool, + ) -> Result { + let now = Utc::now(); + + // This converts QueryResult to AnyQueryResult and then to u64 + // do not delete into() method and do not delete Into trait bound + + Ok(sqlx::query(query) + .bind(now) + .execute(pool) + .await? + .into() + .rows_affected()) + } + + async fn remove_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = params.uuid.unwrap(); + + let result = sqlx::query(query) + .bind(uuid) + .execute(pool) + .await? + .into() + .rows_affected(); + + if result != 1 { + Err(AsyncQueueError::ResultError { + expected: 1, + found: result, + }) + } else { + Ok(result) + } + } + + async fn remove_task_by_metadata( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let metadata = serde_json::to_value(params.runnable.unwrap())?; + + let uniq_hash = calculate_hash(&metadata.to_string()); + + Ok(sqlx::query(query) + .bind(uniq_hash) + .execute(pool) + .await? + .into() + .rows_affected()) + } + + async fn remove_task_type( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let task_type = params.task_type.unwrap(); + + Ok(sqlx::query(query) + .bind(task_type) + .execute(pool) + .await? + .into() + .rows_affected()) + } + + async fn insert_task_if_not_exists( + queries: (&str, &str), + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match Self::find_task_by_uniq_hash(queries.0, pool, ¶ms).await { + Some(task) => Ok(task), + None => Self::insert_task_uniq(queries.1, pool, params).await, + } + } +} diff --git a/fang/src/asynk/backend_sqlx/mysql.rs b/fang/src/asynk/backend_sqlx/mysql.rs new file mode 100644 index 00000000..c0d51409 --- /dev/null +++ b/fang/src/asynk/backend_sqlx/mysql.rs @@ -0,0 +1,454 @@ +const INSERT_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_MYSQL: &str = include_str!("../queries_mysql/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_MYSQL: &str = include_str!("../queries_mysql/update_task_state.sql"); +const FAIL_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL: &str = + include_str!("../queries_mysql/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_MYSQL: &str = + include_str!("../queries_mysql/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_MYSQL: &str = include_str!("../queries_mysql/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_MYSQL: &str = include_str!("../queries_mysql/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL: &str = + include_str!("../queries_mysql/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_MYSQL: &str = include_str!("../queries_mysql/find_task_by_id.sql"); +const RETRY_TASK_QUERY_MYSQL: &str = include_str!("../queries_mysql/retry_task.sql"); + +use chrono::Duration; +use chrono::{DateTime, Utc}; +use sqlx::mysql::MySqlQueryResult; +use sqlx::mysql::MySqlRow; +use sqlx::FromRow; +use sqlx::MySql; +use sqlx::Pool; +use sqlx::Row; +use uuid::Uuid; +use SqlXQuery as Q; + +use super::FangQueryable; +use super::{calculate_hash, QueryParams, Res, SqlXQuery}; +use crate::{AsyncQueueError, FangTaskState, Task}; + +#[derive(Debug, Clone)] +pub(super) struct BackendSqlXMySQL {} + +impl<'a> FromRow<'a, MySqlRow> for Task { + fn from_row(row: &'a MySqlRow) -> Result { + let id: Uuid = row.get("id"); + + let metadata: serde_json::Value = row.get("metadata"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let error_message: Option = row.get("error_message"); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let uniq_hash: Option = row.get("uniq_hash"); + + let retries: i32 = row.get("retries"); + + let scheduled_at: DateTime = row.get("scheduled_at"); + + let created_at: DateTime = row.get("created_at"); + + let updated_at: DateTime = row.get("updated_at"); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + +impl FangQueryable for BackendSqlXMySQL { + async fn insert_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + + let scheduled_at = params.scheduled_at.unwrap(); + + let metadata = params.metadata.unwrap(); + let task_type = params.task_type.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(uuid) + .bind(metadata) + .bind(task_type) + .bind(scheduled_at) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(task) + } + + async fn update_task_state( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at = Utc::now(); + + let state_str: &str = params.state.unwrap().into(); + + let uuid = params.uuid.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(state_str) + .bind(updated_at) + .bind(uuid) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(params.uuid.unwrap()).build(); + + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(task) + } + + async fn insert_task_uniq( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let uuid = Uuid::new_v4(); + + let metadata = params.metadata.unwrap(); + + let metadata_str = metadata.to_string(); + + let scheduled_at = params.scheduled_at.unwrap(); + + let task_type = params.task_type.unwrap(); + + let uniq_hash = calculate_hash(&metadata_str); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(uuid) + .bind(metadata) + .bind(task_type) + .bind(uniq_hash) + .bind(scheduled_at) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(task) + } + + async fn fail_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let updated_at = Utc::now(); + + let id = params.task.unwrap().id; + + let error_message = params.error_message.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(<&str>::from(FangTaskState::Failed)) + .bind(error_message) + .bind(updated_at) + .bind(id) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&id).build(); + + let failed_task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(failed_task) + } + + async fn retry_task( + query: &str, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + let now = Utc::now(); + + let scheduled_at = now + Duration::seconds(params.backoff_seconds.unwrap() as i64); + + let retries = params.task.unwrap().retries + 1; + + let uuid = params.task.unwrap().id; + + let error = params.error_message.unwrap(); + + let affected_rows = Into::::into( + sqlx::query(query) + .bind(error) + .bind(retries) + .bind(scheduled_at) + .bind(now) + .bind(uuid) + .execute(pool) + .await?, + ) + .rows_affected(); + + if affected_rows != 1 { + return Err(AsyncQueueError::ResultError { + expected: 1, + found: affected_rows, + }); + } + + let query_params = QueryParams::builder().uuid(&uuid).build(); + + let failed_task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + query_params, + ) + .await?; + + Ok(failed_task) + } + + async fn insert_task_if_not_exists( + queries: (&str, &str), + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match >::find_task_by_uniq_hash( + queries.0, pool, ¶ms, + ) + .await + { + Some(task) => Ok(task), + None => { + >::insert_task_uniq( + queries.1, pool, params, + ) + .await + } + } + } +} + +impl BackendSqlXMySQL { + pub(super) async fn execute_query( + query: SqlXQuery, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match query { + Q::InsertTask => { + let task = >::insert_task( + INSERT_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = >::update_task_state( + UPDATE_TASK_STATE_QUERY_MYSQL, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + + Q::FailTask => { + let task = >::fail_task( + FAIL_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + + Q::RemoveAllTask => { + let affected_rows = >::remove_all_task( + REMOVE_ALL_TASK_QUERY_MYSQL, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + + Q::RemoveAllScheduledTask => { + let affected_rows = + >::remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_MYSQL, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + + Q::RemoveTask => { + let affected_rows = >::remove_task( + REMOVE_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = + >::remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = >::remove_task_type( + REMOVE_TASKS_TYPE_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::FetchTaskType => { + let task = >::fetch_task_type( + FETCH_TASK_TYPE_QUERY_MYSQL, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskById => { + let task: Task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = >::retry_task( + RETRY_TASK_QUERY_MYSQL, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = >::insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_MYSQL, + INSERT_TASK_UNIQ_QUERY_MYSQL, + ), + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + } + } + + pub(super) fn _name() -> &'static str { + "MySQL" + } +} diff --git a/fang/src/asynk/backend_sqlx/postgres.rs b/fang/src/asynk/backend_sqlx/postgres.rs new file mode 100644 index 00000000..74d56a13 --- /dev/null +++ b/fang/src/asynk/backend_sqlx/postgres.rs @@ -0,0 +1,220 @@ +const INSERT_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/update_task_state.sql"); +const FAIL_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_POSTGRES: &str = + include_str!("../queries_postgres/find_task_by_id.sql"); +const RETRY_TASK_QUERY_POSTGRES: &str = include_str!("../queries_postgres/retry_task.sql"); + +#[derive(Debug, Clone)] +pub(super) struct BackendSqlXPg {} + +use chrono::DateTime; +use chrono::Utc; +use sqlx::postgres::PgRow; +use sqlx::FromRow; +use sqlx::Pool; +use sqlx::Postgres; +use sqlx::Row; +use uuid::Uuid; +use SqlXQuery as Q; + +use super::FangQueryable; +use super::{QueryParams, Res, SqlXQuery}; +use crate::AsyncQueueError; +use crate::FangTaskState; +use crate::Task; + +impl<'a> FromRow<'a, PgRow> for Task { + fn from_row(row: &'a PgRow) -> Result { + let id: Uuid = row.get("id"); + + // -- SELECT metadata->>'type' FROM fang_tasks ; + let metadata: serde_json::Value = row.get("metadata"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let error_message: Option = row.get("error_message"); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let uniq_hash: Option = row.get("uniq_hash"); + + let retries: i32 = row.get("retries"); + + let scheduled_at: DateTime = row.get("scheduled_at"); + + let created_at: DateTime = row.get("created_at"); + + let updated_at: DateTime = row.get("updated_at"); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + +impl FangQueryable for BackendSqlXPg {} + +impl BackendSqlXPg { + pub(super) async fn execute_query( + query: SqlXQuery, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match query { + Q::InsertTask => { + let task = >::insert_task( + INSERT_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = >::update_task_state( + UPDATE_TASK_STATE_QUERY_POSTGRES, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FailTask => { + let task = >::fail_task( + FAIL_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::RemoveAllTask => { + let affected_rows = >::remove_all_task( + REMOVE_ALL_TASK_QUERY_POSTGRES, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveAllScheduledTask => { + let affected_rows = + >::remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_POSTGRES, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTask => { + let affected_rows = >::remove_task( + REMOVE_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = + >::remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = >::remove_task_type( + REMOVE_TASKS_TYPE_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::FetchTaskType => { + let task = >::fetch_task_type( + FETCH_TASK_TYPE_QUERY_POSTGRES, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskById => { + let task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_POSTGRES, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = >::retry_task( + RETRY_TASK_QUERY_POSTGRES, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = >::insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_POSTGRES, + INSERT_TASK_UNIQ_QUERY_POSTGRES, + ), + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + } + } + + pub(super) fn _name() -> &'static str { + "PostgreSQL" + } +} diff --git a/fang/src/asynk/backend_sqlx/sqlite.rs b/fang/src/asynk/backend_sqlx/sqlite.rs new file mode 100644 index 00000000..1d000e22 --- /dev/null +++ b/fang/src/asynk/backend_sqlx/sqlite.rs @@ -0,0 +1,213 @@ +const INSERT_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/insert_task.sql"); +const INSERT_TASK_UNIQ_QUERY_SQLITE: &str = include_str!("../queries_sqlite/insert_task_uniq.sql"); +const UPDATE_TASK_STATE_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/update_task_state.sql"); +const FAIL_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/fail_task.sql"); +const REMOVE_ALL_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/remove_all_tasks.sql"); +const REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/remove_all_scheduled_tasks.sql"); +const REMOVE_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/remove_task.sql"); +const REMOVE_TASK_BY_METADATA_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/remove_task_by_metadata.sql"); +const REMOVE_TASKS_TYPE_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/remove_tasks_type.sql"); +const FETCH_TASK_TYPE_QUERY_SQLITE: &str = include_str!("../queries_sqlite/fetch_task_type.sql"); +const FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE: &str = + include_str!("../queries_sqlite/find_task_by_uniq_hash.sql"); +const FIND_TASK_BY_ID_QUERY_SQLITE: &str = include_str!("../queries_sqlite/find_task_by_id.sql"); +const RETRY_TASK_QUERY_SQLITE: &str = include_str!("../queries_sqlite/retry_task.sql"); + +#[derive(Debug, Clone)] +pub(super) struct BackendSqlXSQLite {} + +use super::FangQueryable; +use super::{QueryParams, Res, SqlXQuery}; +use crate::AsyncQueueError; +use crate::FangTaskState; +use crate::Task; +use chrono::{DateTime, Utc}; +use sqlx::sqlite::SqliteRow; +use sqlx::FromRow; +use sqlx::Pool; +use sqlx::Row; +use sqlx::Sqlite; +use uuid::Uuid; +use SqlXQuery as Q; + +impl<'a> FromRow<'a, SqliteRow> for Task { + fn from_row(row: &'a SqliteRow) -> Result { + let id: Uuid = row.get("id"); + + let metadata: serde_json::Value = row.get("metadata"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let error_message: Option = row.get("error_message"); + + let state_str: &str = row.get("state"); // will work if database cast json to string + + let state: FangTaskState = state_str.into(); + + let task_type: String = row.get("task_type"); + + // Be careful with this if we update sqlx, https://github.com/launchbadge/sqlx/issues/2416 + let uniq_hash: Option = row.get("uniq_hash"); + + let retries: i32 = row.get("retries"); + + let scheduled_at: DateTime = row.get("scheduled_at"); + + let created_at: DateTime = row.get("created_at"); + + let updated_at: DateTime = row.get("updated_at"); + + Ok(Task::builder() + .id(id) + .metadata(metadata) + .error_message(error_message) + .state(state) + .task_type(task_type) + .uniq_hash(uniq_hash) + .retries(retries) + .scheduled_at(scheduled_at) + .created_at(created_at) + .updated_at(updated_at) + .build()) + } +} + +impl FangQueryable for BackendSqlXSQLite {} + +impl BackendSqlXSQLite { + pub(super) async fn execute_query( + query: SqlXQuery, + pool: &Pool, + params: QueryParams<'_>, + ) -> Result { + match query { + Q::InsertTask => { + let task = >::insert_task( + INSERT_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::UpdateTaskState => { + let task = >::update_task_state( + UPDATE_TASK_STATE_QUERY_SQLITE, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FailTask => { + let task = >::fail_task( + FAIL_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::RemoveAllTask => { + let affected_rows = >::remove_all_task( + REMOVE_ALL_TASK_QUERY_SQLITE, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveAllScheduledTask => { + let affected_rows = + >::remove_all_scheduled_tasks( + REMOVE_ALL_SCHEDULED_TASK_QUERY_SQLITE, + pool, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTask => { + let affected_rows = >::remove_task( + REMOVE_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskByMetadata => { + let affected_rows = + >::remove_task_by_metadata( + REMOVE_TASK_BY_METADATA_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::RemoveTaskType => { + let affected_rows = >::remove_task_type( + REMOVE_TASKS_TYPE_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Bigint(affected_rows)) + } + Q::FetchTaskType => { + let task = >::fetch_task_type( + FETCH_TASK_TYPE_QUERY_SQLITE, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::FindTaskById => { + let task = >::find_task_by_id( + FIND_TASK_BY_ID_QUERY_SQLITE, + pool, + params, + ) + .await?; + Ok(Res::Task(task)) + } + Q::RetryTask => { + let task = >::retry_task( + RETRY_TASK_QUERY_SQLITE, + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + Q::InsertTaskIfNotExists => { + let task = >::insert_task_if_not_exists( + ( + FIND_TASK_BY_UNIQ_HASH_QUERY_SQLITE, + INSERT_TASK_UNIQ_QUERY_SQLITE, + ), + pool, + params, + ) + .await?; + + Ok(Res::Task(task)) + } + } + } + + pub(super) fn _name() -> &'static str { + "SQLite" + } +} diff --git a/fang/src/asynk/queries/fail_task.sql b/fang/src/asynk/queries/fail_task.sql deleted file mode 100644 index 17192868..00000000 --- a/fang/src/asynk/queries/fail_task.sql +++ /dev/null @@ -1 +0,0 @@ -UPDATE "fang_tasks" SET "state" = $1 , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING * diff --git a/fang/src/asynk/queries/fetch_task_type.sql b/fang/src/asynk/queries/fetch_task_type.sql deleted file mode 100644 index e0558202..00000000 --- a/fang/src/asynk/queries/fetch_task_type.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries/insert_task.sql b/fang/src/asynk/queries/insert_task.sql deleted file mode 100644 index 514d921a..00000000 --- a/fang/src/asynk/queries/insert_task.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO "fang_tasks" ("metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3) RETURNING * diff --git a/fang/src/asynk/queries/insert_task_uniq.sql b/fang/src/asynk/queries/insert_task_uniq.sql deleted file mode 100644 index 08173836..00000000 --- a/fang/src/asynk/queries/insert_task_uniq.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO "fang_tasks" ("metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2 , $3, $4) RETURNING * diff --git a/fang/src/asynk/queries_mysql/fail_task.sql b/fang/src/asynk/queries_mysql/fail_task.sql new file mode 100644 index 00000000..481c27d3 --- /dev/null +++ b/fang/src/asynk/queries_mysql/fail_task.sql @@ -0,0 +1 @@ +UPDATE fang_tasks SET state = ? , error_message = ? , updated_at = ? WHERE id = ? ; diff --git a/fang/src/asynk/queries_mysql/fetch_task_type.sql b/fang/src/asynk/queries_mysql/fetch_task_type.sql new file mode 100644 index 00000000..51929152 --- /dev/null +++ b/fang/src/asynk/queries_mysql/fetch_task_type.sql @@ -0,0 +1 @@ +SELECT * FROM fang_tasks WHERE task_type = ? AND state in ('new', 'retried') AND ? >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_mysql/find_task_by_id.sql b/fang/src/asynk/queries_mysql/find_task_by_id.sql new file mode 100644 index 00000000..234cf1f5 --- /dev/null +++ b/fang/src/asynk/queries_mysql/find_task_by_id.sql @@ -0,0 +1 @@ +SELECT * FROM fang_tasks WHERE id = ?; diff --git a/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql new file mode 100644 index 00000000..9250db9a --- /dev/null +++ b/fang/src/asynk/queries_mysql/find_task_by_uniq_hash.sql @@ -0,0 +1 @@ +SELECT * FROM fang_tasks WHERE uniq_hash = ? AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_mysql/insert_task.sql b/fang/src/asynk/queries_mysql/insert_task.sql new file mode 100644 index 00000000..1045f2f9 --- /dev/null +++ b/fang/src/asynk/queries_mysql/insert_task.sql @@ -0,0 +1 @@ +INSERT INTO fang_tasks (id, metadata, task_type, scheduled_at) VALUES (?, ?, ?, ?); \ No newline at end of file diff --git a/fang/src/asynk/queries_mysql/insert_task_uniq.sql b/fang/src/asynk/queries_mysql/insert_task_uniq.sql new file mode 100644 index 00000000..000a3d7e --- /dev/null +++ b/fang/src/asynk/queries_mysql/insert_task_uniq.sql @@ -0,0 +1 @@ +INSERT INTO fang_tasks(id,metadata,task_type,uniq_hash,scheduled_at) VALUES (?, ? , ?, ?, ?) ; diff --git a/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql new file mode 100644 index 00000000..80e20846 --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_all_scheduled_tasks.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE scheduled_at > ? diff --git a/fang/src/asynk/queries_mysql/remove_all_tasks.sql b/fang/src/asynk/queries_mysql/remove_all_tasks.sql new file mode 100644 index 00000000..4da949ca --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_all_tasks.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks diff --git a/fang/src/asynk/queries_mysql/remove_task.sql b/fang/src/asynk/queries_mysql/remove_task.sql new file mode 100644 index 00000000..2cc4ddc2 --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_task.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE id = ? diff --git a/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql new file mode 100644 index 00000000..f8474e89 --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_task_by_metadata.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE uniq_hash = ? ; diff --git a/fang/src/asynk/queries_mysql/remove_tasks_type.sql b/fang/src/asynk/queries_mysql/remove_tasks_type.sql new file mode 100644 index 00000000..a415d20a --- /dev/null +++ b/fang/src/asynk/queries_mysql/remove_tasks_type.sql @@ -0,0 +1 @@ +DELETE FROM fang_tasks WHERE task_type = ? diff --git a/fang/src/asynk/queries_mysql/retry_task.sql b/fang/src/asynk/queries_mysql/retry_task.sql new file mode 100644 index 00000000..b0481720 --- /dev/null +++ b/fang/src/asynk/queries_mysql/retry_task.sql @@ -0,0 +1 @@ +UPDATE fang_tasks SET state = 'retried' , error_message = ?, retries = ?, scheduled_at = ?, updated_at = ? WHERE id = ?; \ No newline at end of file diff --git a/fang/src/asynk/queries_mysql/update_task_state.sql b/fang/src/asynk/queries_mysql/update_task_state.sql new file mode 100644 index 00000000..237da09f --- /dev/null +++ b/fang/src/asynk/queries_mysql/update_task_state.sql @@ -0,0 +1 @@ +UPDATE fang_tasks SET state = ? , updated_at = ? WHERE id = ?; \ No newline at end of file diff --git a/fang/src/asynk/queries_postgres/fail_task.sql b/fang/src/asynk/queries_postgres/fail_task.sql new file mode 100644 index 00000000..f8a9bdc7 --- /dev/null +++ b/fang/src/asynk/queries_postgres/fail_task.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at \ No newline at end of file diff --git a/fang/src/asynk/queries_postgres/fetch_task_type.sql b/fang/src/asynk/queries_postgres/fetch_task_type.sql new file mode 100644 index 00000000..14f4af22 --- /dev/null +++ b/fang/src/asynk/queries_postgres/fetch_task_type.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 FOR UPDATE SKIP LOCKED diff --git a/fang/src/asynk/queries_postgres/find_task_by_id.sql b/fang/src/asynk/queries_postgres/find_task_by_id.sql new file mode 100644 index 00000000..88c99be4 --- /dev/null +++ b/fang/src/asynk/queries_postgres/find_task_by_id.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE id = $1::uuid diff --git a/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql new file mode 100644 index 00000000..3c937a54 --- /dev/null +++ b/fang/src/asynk/queries_postgres/find_task_by_uniq_hash.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE uniq_hash = $1 AND state in ('new', 'retried') LIMIT 1 diff --git a/fang/src/asynk/queries_postgres/insert_task.sql b/fang/src/asynk/queries_postgres/insert_task.sql new file mode 100644 index 00000000..f719d04c --- /dev/null +++ b/fang/src/asynk/queries_postgres/insert_task.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2::jsonb, $3, $4 ) RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/insert_task_uniq.sql b/fang/src/asynk/queries_postgres/insert_task_uniq.sql new file mode 100644 index 00000000..15a78ab2 --- /dev/null +++ b/fang/src/asynk/queries_postgres/insert_task_uniq.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2::jsonb , $3, $4, $5 ) RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql similarity index 100% rename from fang/src/asynk/queries/remove_all_scheduled_tasks.sql rename to fang/src/asynk/queries_postgres/remove_all_scheduled_tasks.sql diff --git a/fang/src/asynk/queries/remove_all_tasks.sql b/fang/src/asynk/queries_postgres/remove_all_tasks.sql similarity index 100% rename from fang/src/asynk/queries/remove_all_tasks.sql rename to fang/src/asynk/queries_postgres/remove_all_tasks.sql diff --git a/fang/src/asynk/queries_postgres/remove_task.sql b/fang/src/asynk/queries_postgres/remove_task.sql new file mode 100644 index 00000000..e6a2261c --- /dev/null +++ b/fang/src/asynk/queries_postgres/remove_task.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE id = $1::uuid diff --git a/fang/src/asynk/queries/remove_task_by_metadata.sql b/fang/src/asynk/queries_postgres/remove_task_by_metadata.sql similarity index 100% rename from fang/src/asynk/queries/remove_task_by_metadata.sql rename to fang/src/asynk/queries_postgres/remove_task_by_metadata.sql diff --git a/fang/src/asynk/queries/remove_tasks_type.sql b/fang/src/asynk/queries_postgres/remove_tasks_type.sql similarity index 100% rename from fang/src/asynk/queries/remove_tasks_type.sql rename to fang/src/asynk/queries_postgres/remove_tasks_type.sql diff --git a/fang/src/asynk/queries_postgres/retry_task.sql b/fang/src/asynk/queries_postgres/retry_task.sql new file mode 100644 index 00000000..e559422b --- /dev/null +++ b/fang/src/asynk/queries_postgres/retry_task.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = 'retried' , "error_message" = $1, "retries" = $2, scheduled_at = $3, "updated_at" = $4 WHERE id = $5::uuid RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_postgres/update_task_state.sql b/fang/src/asynk/queries_postgres/update_task_state.sql new file mode 100644 index 00000000..8620e755 --- /dev/null +++ b/fang/src/asynk/queries_postgres/update_task_state.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = $1::fang_task_state , "updated_at" = $2 WHERE id = $3::uuid RETURNING id , metadata , error_message, state::text , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_sqlite/fail_task.sql b/fang/src/asynk/queries_sqlite/fail_task.sql new file mode 100644 index 00000000..f8ae5f5b --- /dev/null +++ b/fang/src/asynk/queries_sqlite/fail_task.sql @@ -0,0 +1 @@ +UPDATE "fang_tasks" SET "state" = $1 , "error_message" = $2 , "updated_at" = $3 WHERE id = $4 RETURNING id , metadata , error_message, state , task_type , uniq_hash, retries , scheduled_at , created_at , updated_at diff --git a/fang/src/asynk/queries_sqlite/fetch_task_type.sql b/fang/src/asynk/queries_sqlite/fetch_task_type.sql new file mode 100644 index 00000000..02c3f9f4 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/fetch_task_type.sql @@ -0,0 +1 @@ +SELECT id , metadata , error_message, state, task_type , uniq_hash, retries , scheduled_at , created_at , updated_at FROM fang_tasks WHERE task_type = $1 AND state in ('new', 'retried') AND $2 >= scheduled_at ORDER BY created_at ASC, scheduled_at ASC LIMIT 1 diff --git a/fang/src/asynk/queries/find_task_by_id.sql b/fang/src/asynk/queries_sqlite/find_task_by_id.sql similarity index 100% rename from fang/src/asynk/queries/find_task_by_id.sql rename to fang/src/asynk/queries_sqlite/find_task_by_id.sql diff --git a/fang/src/asynk/queries/find_task_by_uniq_hash.sql b/fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql similarity index 100% rename from fang/src/asynk/queries/find_task_by_uniq_hash.sql rename to fang/src/asynk/queries_sqlite/find_task_by_uniq_hash.sql diff --git a/fang/src/asynk/queries_sqlite/insert_task.sql b/fang/src/asynk/queries_sqlite/insert_task.sql new file mode 100644 index 00000000..00a03515 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/insert_task.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ("id", "metadata", "task_type", "scheduled_at") VALUES ($1, $2, $3, $4) RETURNING * diff --git a/fang/src/asynk/queries_sqlite/insert_task_uniq.sql b/fang/src/asynk/queries_sqlite/insert_task_uniq.sql new file mode 100644 index 00000000..f21dc2a5 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/insert_task_uniq.sql @@ -0,0 +1 @@ +INSERT INTO "fang_tasks" ( "id" , "metadata", "task_type" , "uniq_hash", "scheduled_at") VALUES ($1, $2 , $3, $4, $5 ) RETURNING * diff --git a/fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql b/fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql new file mode 100644 index 00000000..61a5b6b5 --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_all_scheduled_tasks.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE scheduled_at > $1 diff --git a/fang/src/asynk/queries_sqlite/remove_all_tasks.sql b/fang/src/asynk/queries_sqlite/remove_all_tasks.sql new file mode 100644 index 00000000..eaecbbaf --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_all_tasks.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" diff --git a/fang/src/asynk/queries/remove_task.sql b/fang/src/asynk/queries_sqlite/remove_task.sql similarity index 100% rename from fang/src/asynk/queries/remove_task.sql rename to fang/src/asynk/queries_sqlite/remove_task.sql diff --git a/fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql b/fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql new file mode 100644 index 00000000..94324e2a --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_task_by_metadata.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE uniq_hash = $1 diff --git a/fang/src/asynk/queries_sqlite/remove_tasks_type.sql b/fang/src/asynk/queries_sqlite/remove_tasks_type.sql new file mode 100644 index 00000000..e4de9c0f --- /dev/null +++ b/fang/src/asynk/queries_sqlite/remove_tasks_type.sql @@ -0,0 +1 @@ +DELETE FROM "fang_tasks" WHERE task_type = $1 diff --git a/fang/src/asynk/queries/retry_task.sql b/fang/src/asynk/queries_sqlite/retry_task.sql similarity index 100% rename from fang/src/asynk/queries/retry_task.sql rename to fang/src/asynk/queries_sqlite/retry_task.sql diff --git a/fang/src/asynk/queries/update_task_state.sql b/fang/src/asynk/queries_sqlite/update_task_state.sql similarity index 77% rename from fang/src/asynk/queries/update_task_state.sql rename to fang/src/asynk/queries_sqlite/update_task_state.sql index e2e2d94d..a796e7db 100644 --- a/fang/src/asynk/queries/update_task_state.sql +++ b/fang/src/asynk/queries_sqlite/update_task_state.sql @@ -1 +1 @@ -UPDATE "fang_tasks" SET "state" = $1 , "updated_at" = $2 WHERE id = $3 RETURNING * +UPDATE "fang_tasks" SET "state" = $1 , "updated_at" = $2 WHERE id = $3 RETURNING * \ No newline at end of file diff --git a/fang/src/blocking/mysql_schema.rs b/fang/src/blocking/mysql_schema.rs index 4b98594f..94273909 100644 --- a/fang/src/blocking/mysql_schema.rs +++ b/fang/src/blocking/mysql_schema.rs @@ -11,8 +11,8 @@ diesel::table! { use super::sql_types::FangTasksStateEnum; fang_tasks (id) { - #[max_length = 36] - id -> Varchar, + #[max_length = 16] + id -> Binary, metadata -> Json, error_message -> Nullable, #[max_length = 11] @@ -20,10 +20,10 @@ diesel::table! { #[max_length = 255] task_type -> Varchar, #[max_length = 64] - uniq_hash -> Nullable, + uniq_hash -> Nullable, retries -> Integer, - scheduled_at -> Timestamp, - created_at -> Timestamp, - updated_at -> Timestamp, + scheduled_at -> Datetime, + created_at -> Datetime, + updated_at -> Datetime, } } diff --git a/fang/src/blocking/postgres_schema.rs b/fang/src/blocking/postgres_schema.rs index 15b051c7..89234568 100644 --- a/fang/src/blocking/postgres_schema.rs +++ b/fang/src/blocking/postgres_schema.rs @@ -15,9 +15,8 @@ diesel::table! { metadata -> Jsonb, error_message -> Nullable, state -> FangTaskState, - task_type -> Varchar, - #[max_length = 64] - uniq_hash -> Nullable, + task_type -> Text, + uniq_hash -> Nullable, retries -> Int4, scheduled_at -> Timestamptz, created_at -> Timestamptz, diff --git a/fang/src/blocking/queue.rs b/fang/src/blocking/queue.rs index 3716833f..a53542c4 100644 --- a/fang/src/blocking/queue.rs +++ b/fang/src/blocking/queue.rs @@ -85,14 +85,14 @@ pub trait Queueable { fn remove_tasks_of_type(&self, task_type: &str) -> Result; /// Remove a task by its id. - fn remove_task(&self, id: Uuid) -> Result; + fn remove_task(&self, id: &Uuid) -> Result; /// To use this function task has to be uniq. uniq() has to return true. /// If task is not uniq this function will not do anything. /// Remove a task by its metadata (struct fields values) fn remove_task_by_metadata(&self, task: &dyn Runnable) -> Result; - fn find_task_by_id(&self, id: Uuid) -> Option; + fn find_task_by_id(&self, id: &Uuid) -> Option; /// Update the state field of the specified task /// See the `FangTaskState` enum for possible states. @@ -175,7 +175,7 @@ impl Queueable for Queue { Self::remove_tasks_of_type_query(&mut connection, task_type) } - fn remove_task(&self, id: Uuid) -> Result { + fn remove_task(&self, id: &Uuid) -> Result { let mut connection = self.get_connection()?; Self::remove_task_query(&mut connection, id) @@ -205,7 +205,7 @@ impl Queueable for Queue { Self::fail_task_query(&mut connection, task, error) } - fn find_task_by_id(&self, id: Uuid) -> Option { + fn find_task_by_id(&self, id: &Uuid) -> Option { let mut connection = self.get_connection().unwrap(); Self::find_task_by_id_query(&mut connection, id) @@ -344,7 +344,7 @@ impl Queue { }) } - pub fn find_task_by_id_query(connection: &mut PgConnection, id: Uuid) -> Option { + pub fn find_task_by_id_query(connection: &mut PgConnection, id: &Uuid) -> Option { fang_tasks::table .filter(fang_tasks::id.eq(id)) .first::(connection) @@ -385,7 +385,10 @@ impl Queue { Ok(diesel::delete(query).execute(connection)?) } - pub fn remove_task_query(connection: &mut PgConnection, id: Uuid) -> Result { + pub fn remove_task_query( + connection: &mut PgConnection, + id: &Uuid, + ) -> Result { let query = fang_tasks::table.filter(fang_tasks::id.eq(id)); Ok(diesel::delete(query).execute(connection)?) diff --git a/fang/src/blocking/queue/queue_tests.rs b/fang/src/blocking/queue/queue_tests.rs index 7529af23..8bf8c1ee 100644 --- a/fang/src/blocking/queue/queue_tests.rs +++ b/fang/src/blocking/queue/queue_tests.rs @@ -269,8 +269,8 @@ macro_rules! test_queue { let result = queue.remove_all_tasks().unwrap(); assert_eq!(2, result); - assert_eq!(None, queue.find_task_by_id(task1.id)); - assert_eq!(None, queue.find_task_by_id(task2.id)); + assert_eq!(None, queue.find_task_by_id(&task1.id)); + assert_eq!(None, queue.find_task_by_id(&task2.id)); } #[test] @@ -283,13 +283,13 @@ macro_rules! test_queue { let task1 = queue.insert_task(&task1).unwrap(); let task2 = queue.insert_task(&task2).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_some()); - queue.remove_task(task1.id).unwrap(); + queue.remove_task(&task1.id).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_none()); - assert!(queue.find_task_by_id(task2.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_none()); + assert!(queue.find_task_by_id(&task2.id).is_some()); } #[test] @@ -302,13 +302,13 @@ macro_rules! test_queue { let task1 = queue.insert_task(&task1).unwrap(); let task2 = queue.insert_task(&task2).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_some()); queue.remove_tasks_of_type("weirdo").unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_none()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_none()); } #[test] @@ -323,15 +323,15 @@ macro_rules! test_queue { let task2 = queue.insert_task(&m_task2).unwrap(); let task3 = queue.insert_task(&m_task3).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_some()); - assert!(queue.find_task_by_id(task2.id).is_some()); - assert!(queue.find_task_by_id(task3.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_some()); + assert!(queue.find_task_by_id(&task2.id).is_some()); + assert!(queue.find_task_by_id(&task3.id).is_some()); queue.remove_task_by_metadata(&m_task1).unwrap(); - assert!(queue.find_task_by_id(task1.id).is_none()); - assert!(queue.find_task_by_id(task2.id).is_some()); - assert!(queue.find_task_by_id(task3.id).is_some()); + assert!(queue.find_task_by_id(&task1.id).is_none()); + assert!(queue.find_task_by_id(&task2.id).is_some()); + assert!(queue.find_task_by_id(&task3.id).is_some()); } } }; diff --git a/fang/src/blocking/sqlite_schema.rs b/fang/src/blocking/sqlite_schema.rs index 1062df45..602d1756 100644 --- a/fang/src/blocking/sqlite_schema.rs +++ b/fang/src/blocking/sqlite_schema.rs @@ -2,15 +2,15 @@ diesel::table! { fang_tasks (id) { - id -> Text, + id -> Binary, metadata -> Text, error_message -> Nullable, state -> Text, task_type -> Text, uniq_hash -> Nullable, retries -> Integer, - scheduled_at -> Timestamp, - created_at -> Timestamp, - updated_at -> Timestamp, + scheduled_at -> Integer, + created_at -> Integer, + updated_at -> Integer, } } diff --git a/fang/src/blocking/worker.rs b/fang/src/blocking/worker.rs index afa2269d..f2dc33a0 100644 --- a/fang/src/blocking/worker.rs +++ b/fang/src/blocking/worker.rs @@ -136,12 +136,12 @@ where } RetentionMode::RemoveAll => { - self.queue.remove_task(task.id)?; + self.queue.remove_task(&task.id)?; } RetentionMode::RemoveFinished => match result { Ok(_) => { - self.queue.remove_task(task.id)?; + self.queue.remove_task(&task.id)?; } Err(error) => { self.queue.fail_task(task, &error.description)?; @@ -305,7 +305,7 @@ mod worker_tests { // this operation commits and thats why need to commit this test worker.run(&task).unwrap(); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Finished, found_task.state); @@ -340,10 +340,10 @@ mod worker_tests { std::thread::sleep(std::time::Duration::from_millis(1000)); - let found_task1 = Queue::find_task_by_id_query(&mut pooled_connection, task1.id).unwrap(); + let found_task1 = Queue::find_task_by_id_query(&mut pooled_connection, &task1.id).unwrap(); assert_eq!(FangTaskState::Finished, found_task1.state); - let found_task2 = Queue::find_task_by_id_query(&mut pooled_connection, task2.id).unwrap(); + let found_task2 = Queue::find_task_by_id_query(&mut pooled_connection, &task2.id).unwrap(); assert_eq!(FangTaskState::New, found_task2.state); Queue::remove_tasks_of_type_query(&mut pooled_connection, "type1").unwrap(); @@ -373,7 +373,7 @@ mod worker_tests { worker.run(&task).unwrap(); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Failed, found_task.state); assert_eq!( @@ -409,7 +409,7 @@ mod worker_tests { std::thread::sleep(std::time::Duration::from_millis(1000)); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Retried, found_task.state); assert_eq!(1, found_task.retries); @@ -420,7 +420,7 @@ mod worker_tests { worker.run_tasks_until_none().unwrap(); - let found_task = Queue::find_task_by_id_query(&mut pooled_connection, task.id).unwrap(); + let found_task = Queue::find_task_by_id_query(&mut pooled_connection, &task.id).unwrap(); assert_eq!(FangTaskState::Failed, found_task.state); assert_eq!(2, found_task.retries); diff --git a/fang/src/lib.rs b/fang/src/lib.rs index e6abb131..7c1d0096 100644 --- a/fang/src/lib.rs +++ b/fang/src/lib.rs @@ -6,11 +6,8 @@ use std::time::Duration; use thiserror::Error; use typed_builder::TypedBuilder; use uuid::Uuid; - -#[cfg(feature = "asynk")] -use postgres_types::{FromSql, ToSql}; -/// Represents a schedule for scheduled tasks. /// +/// Represents a schedule for scheduled tasks. /// It's used in the [`AsyncRunnable::cron`] and [`Runnable::cron`] #[derive(Debug, Clone)] pub enum Scheduled { @@ -104,37 +101,55 @@ pub struct FangError { /// Possible states of the task #[derive(Debug, Eq, PartialEq, Clone)] #[cfg_attr(feature = "blocking", derive(diesel_derive_enum::DbEnum))] -#[cfg_attr(feature = "asynk", derive(ToSql, FromSql, Default))] -#[cfg_attr(feature = "asynk", postgres(name = "fang_task_state"))] #[cfg_attr( feature = "blocking", ExistingTypePath = "crate::postgres_schema::sql_types::FangTaskState" )] pub enum FangTaskState { /// The task is ready to be executed - #[cfg_attr(feature = "asynk", postgres(name = "new"))] - #[cfg_attr(feature = "asynk", default)] New, /// The task is being executed. /// /// The task may stay in this state forever /// if an unexpected error happened - #[cfg_attr(feature = "asynk", postgres(name = "in_progress"))] InProgress, /// The task failed - #[cfg_attr(feature = "asynk", postgres(name = "failed"))] Failed, /// The task finished successfully - #[cfg_attr(feature = "asynk", postgres(name = "finished"))] Finished, /// The task is being retried. It means it failed but it's scheduled to be executed again - #[cfg_attr(feature = "asynk", postgres(name = "retried"))] Retried, } +impl> From for FangTaskState { + fn from(str: S) -> Self { + let str = str.as_ref(); + match str { + "new" => FangTaskState::New, + "in_progress" => FangTaskState::InProgress, + "failed" => FangTaskState::Failed, + "finished" => FangTaskState::Finished, + "retried" => FangTaskState::Retried, + _ => unreachable!(), + } + } +} + +impl From for &str { + fn from(state: FangTaskState) -> Self { + match state { + FangTaskState::New => "new", + FangTaskState::InProgress => "in_progress", + FangTaskState::Failed => "failed", + FangTaskState::Finished => "finished", + FangTaskState::Retried => "retried", + } + } +} + #[derive(Debug, Eq, PartialEq, Clone, TypedBuilder)] #[cfg_attr(feature = "blocking", derive(Queryable, Identifiable))] -#[cfg_attr(feature = "blocking", diesel(table_name = fang_tasks))] +#[diesel(table_name = fang_tasks)] pub struct Task { #[builder(setter(into))] pub id: Uuid, @@ -195,10 +210,6 @@ pub mod asynk; #[cfg(feature = "asynk")] pub use asynk::*; -#[cfg(feature = "asynk")] -#[doc(hidden)] -pub use bb8_postgres::tokio_postgres::tls::NoTls; - #[cfg(feature = "asynk")] #[doc(hidden)] pub use async_trait::async_trait; @@ -212,14 +223,14 @@ use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; #[cfg(feature = "migrations")] use std::error::Error as SomeError; -#[cfg(feature = "migrations_postgres")] +#[cfg(feature = "migrations-postgres")] use diesel::pg::Pg; -#[cfg(feature = "migrations_postgres")] +#[cfg(feature = "migrations-postgres")] pub const MIGRATIONS_POSTGRES: EmbeddedMigrations = embed_migrations!("postgres_migrations/migrations"); -#[cfg(feature = "migrations_postgres")] +#[cfg(feature = "migrations-postgres")] pub fn run_migrations_postgres( connection: &mut impl MigrationHarness, ) -> Result<(), Box> { @@ -228,13 +239,13 @@ pub fn run_migrations_postgres( Ok(()) } -#[cfg(feature = "migrations_mysql")] +#[cfg(feature = "migrations-mysql")] use diesel::mysql::Mysql; -#[cfg(feature = "migrations_mysql")] +#[cfg(feature = "migrations-mysql")] pub const MIGRATIONS_MYSQL: EmbeddedMigrations = embed_migrations!("mysql_migrations/migrations"); -#[cfg(feature = "migrations_mysql")] +#[cfg(feature = "migrations-mysql")] pub fn run_migrations_mysql( connection: &mut impl MigrationHarness, ) -> Result<(), Box> { @@ -243,13 +254,13 @@ pub fn run_migrations_mysql( Ok(()) } -#[cfg(feature = "migrations_sqlite")] +#[cfg(feature = "migrations-sqlite")] use diesel::sqlite::Sqlite; -#[cfg(feature = "migrations_sqlite")] +#[cfg(feature = "migrations-sqlite")] pub const MIGRATIONS_SQLITE: EmbeddedMigrations = embed_migrations!("sqlite_migrations/migrations"); -#[cfg(feature = "migrations_sqlite")] +#[cfg(feature = "migrations-sqlite")] pub fn run_migrations_sqlite( connection: &mut impl MigrationHarness, ) -> Result<(), Box> {