feat: fix jobs

This commit is contained in:
2025-12-31 01:15:05 +01:00
parent 0049fa8167
commit fc151a68fe
3 changed files with 74 additions and 16 deletions

View File

@@ -0,0 +1,44 @@
{
"db_name": "SQLite",
"query": "SELECT * FROM job_events ORDER BY created_at DESC LIMIT 1000",
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Integer"
},
{
"name": "job_name",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "event_type",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "event_data",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 4,
"type_info": "Text"
}
],
"parameters": {
"Right": 0
},
"nullable": [
false,
false,
false,
true,
true
]
},
"hash": "2f0d0c29c0a1c4892c3a01b47a2aa586d25d4644a907d748cd3ade0401fc77be"
}

View File

@@ -1,7 +1,9 @@
use std::{error::Error, option, time::Duration};
use actix_web::{Responder, get};
use quartz::{Job, Scheduler, Trigger};
use sqlx::query;
use serde::{Deserialize, Serialize};
use sqlx::{query, query_as};
use tokio::runtime::Handle;
async fn log_job_event(job_name: &str, event: &str, event_data: Option<&str>) {
@@ -34,32 +36,20 @@ use crate::{auth, db};
pub fn init_scheduler(rt: Handle) -> Scheduler {
let scheduler = Scheduler::new();
let trigger_every_5min = Trigger::with_identity("5min_trigger", "default_group")
.every(Duration::from_mins(5))
.repeat(u32::max_value());
let trigger_every_3min = Trigger::with_identity("3min_trigger", "default_group")
let trigger_every_3min = Trigger::with_identity("3min_trigger", "3min_group")
.every(Duration::from_mins(3))
.repeat(u32::max_value());
let rt_cleanup = rt.clone();
let cleanup_tokens_job = Job::with_identity("cleanup_tokens_job", "default_group", move || {
let rt = rt_cleanup.clone();
rt.spawn(async move {
let _ = cleanup_old_tokens().await;
});
});
let rt_refresh = rt.clone();
let refresh_access_token_job =
Job::with_identity("refresh_access_token_job", "default_group", move || {
Job::with_identity("refresh_access_token_job", "refresh_group", move || {
let rt = rt_refresh.clone();
rt.spawn(async move {
let _ = refresh_access_token().await;
let _ = cleanup_old_tokens().await;
});
});
scheduler.schedule_job(cleanup_tokens_job, trigger_every_5min);
scheduler.schedule_job(refresh_access_token_job, trigger_every_3min);
scheduler
@@ -95,3 +85,26 @@ pub async fn refresh_access_token() -> Result<(), anyhow::Error> {
log_job_event("REFRESH_ACCESS_TOKEN", "FINISHED", None).await;
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
struct JobEvent {
id: i64,
job_name: String,
event_type: String,
event_data: Option<String>,
created_at: Option<String>,
}
#[get("/api/job_events")]
pub async fn get_job_events() -> impl Responder {
let pool = db::get_pool().await.expect("Could not get database pool");
let logs = query_as!(
JobEvent,
"SELECT * FROM job_events ORDER BY created_at DESC LIMIT 1000"
)
.fetch_all(&pool)
.await
.expect("Could not fetch job events");
serde_json::to_string(&logs).unwrap()
}

View File

@@ -58,6 +58,7 @@ async fn main() -> std::io::Result<()> {
.service(servers::get_server)
.service(servers::update_server_hostname)
.service(servers::get_server_metrics)
.service(jobs::get_job_events)
.service(
Files::new("/", "./frontend/dist")
.index_file("index.html")