Use tokio::sync::mutex for Crawls

This commit is contained in:
Tyler Hallada 2023-07-19 23:02:25 -04:00
parent d17f909312
commit e6a37703be
3 changed files with 13 additions and 14 deletions

View File

@ -133,9 +133,7 @@ pub async fn post(
let receiver = crawl_scheduler.schedule(feed.feed_id).await;
{
let mut crawls = crawls.lock().map_err(|_| {
AddFeedError::CreateFeedError(add_feed.url.clone(), Error::InternalServerError)
})?;
let mut crawls = crawls.lock().await;
crawls.insert(feed.feed_id, receiver);
}
@ -164,7 +162,7 @@ pub async fn stream(
State(crawls): State<Crawls>,
) -> Result<impl IntoResponse> {
let receiver = {
let mut crawls = crawls.lock().expect("crawls lock poisoned");
let mut crawls = crawls.lock().await;
crawls.remove(&id.as_uuid())
}
.ok_or_else(|| Error::NotFound("feed stream", id.as_uuid()))?;

View File

@ -2,7 +2,7 @@ use std::{
collections::HashMap,
net::SocketAddr,
path::Path,
sync::{Arc, Mutex},
sync::Arc,
};
use anyhow::Result;
@ -17,6 +17,7 @@ use notify::Watcher;
use reqwest::Client;
use sqlx::postgres::PgPoolOptions;
use tokio::sync::watch::channel;
use tokio::sync::Mutex;
use tower::ServiceBuilder;
use tower_http::{services::ServeDir, trace::TraceLayer};
use tower_livereload::LiveReloadLayer;

View File

@ -1,27 +1,27 @@
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use tokio::sync::{broadcast, watch};
use tokio::sync::{broadcast, watch, Mutex};
use axum::extract::FromRef;
use bytes::Bytes;
use reqwest::Client;
use sqlx::PgPool;
use uuid::Uuid;
use reqwest::Client;
use crate::actors::crawl_scheduler::{CrawlSchedulerHandle, CrawlSchedulerHandleMessage};
use crate::config::Config;
use crate::domain_locks::DomainLocks;
/// A map of feed IDs to a channel receiver for the active `CrawlScheduler` running a feed crawl
/// A map of feed IDs to a channel receiver for the active `CrawlScheduler` running a feed crawl
/// for that feed.
///
/// Currently, the only purpose of this is to keep track of active crawls so that axum handlers can
/// subscribe to the result of the crawl via the receiver channel which are then sent to end-users
/// Currently, the only purpose of this is to keep track of active crawls so that axum handlers can
/// subscribe to the result of the crawl via the receiver channel which are then sent to end-users
/// as a stream of server-sent events.
///
/// This map should only contain crawls that have just been created but not yet subscribed to.
/// Entries are only added when a user adds a feed in the UI and entries are removed by the same
///
/// This map should only contain crawls that have just been created but not yet subscribed to.
/// Entries are only added when a user adds a feed in the UI and entries are removed by the same
/// user once a server-sent event connection is established.
pub type Crawls = Arc<Mutex<HashMap<Uuid, broadcast::Receiver<CrawlSchedulerHandleMessage>>>>;