Use tokio::sync::mutex for Crawls

This commit is contained in:
Tyler Hallada 2023-07-19 23:02:25 -04:00
parent d17f909312
commit e6a37703be
3 changed files with 13 additions and 14 deletions

View File

@ -133,9 +133,7 @@ pub async fn post(
let receiver = crawl_scheduler.schedule(feed.feed_id).await; let receiver = crawl_scheduler.schedule(feed.feed_id).await;
{ {
let mut crawls = crawls.lock().map_err(|_| { let mut crawls = crawls.lock().await;
AddFeedError::CreateFeedError(add_feed.url.clone(), Error::InternalServerError)
})?;
crawls.insert(feed.feed_id, receiver); crawls.insert(feed.feed_id, receiver);
} }
@ -164,7 +162,7 @@ pub async fn stream(
State(crawls): State<Crawls>, State(crawls): State<Crawls>,
) -> Result<impl IntoResponse> { ) -> Result<impl IntoResponse> {
let receiver = { let receiver = {
let mut crawls = crawls.lock().expect("crawls lock poisoned"); let mut crawls = crawls.lock().await;
crawls.remove(&id.as_uuid()) crawls.remove(&id.as_uuid())
} }
.ok_or_else(|| Error::NotFound("feed stream", id.as_uuid()))?; .ok_or_else(|| Error::NotFound("feed stream", id.as_uuid()))?;

View File

@ -2,7 +2,7 @@ use std::{
collections::HashMap, collections::HashMap,
net::SocketAddr, net::SocketAddr,
path::Path, path::Path,
sync::{Arc, Mutex}, sync::Arc,
}; };
use anyhow::Result; use anyhow::Result;
@ -17,6 +17,7 @@ use notify::Watcher;
use reqwest::Client; use reqwest::Client;
use sqlx::postgres::PgPoolOptions; use sqlx::postgres::PgPoolOptions;
use tokio::sync::watch::channel; use tokio::sync::watch::channel;
use tokio::sync::Mutex;
use tower::ServiceBuilder; use tower::ServiceBuilder;
use tower_http::{services::ServeDir, trace::TraceLayer}; use tower_http::{services::ServeDir, trace::TraceLayer};
use tower_livereload::LiveReloadLayer; use tower_livereload::LiveReloadLayer;

View File

@ -1,27 +1,27 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use tokio::sync::{broadcast, watch}; use tokio::sync::{broadcast, watch, Mutex};
use axum::extract::FromRef; use axum::extract::FromRef;
use bytes::Bytes; use bytes::Bytes;
use reqwest::Client;
use sqlx::PgPool; use sqlx::PgPool;
use uuid::Uuid; use uuid::Uuid;
use reqwest::Client;
use crate::actors::crawl_scheduler::{CrawlSchedulerHandle, CrawlSchedulerHandleMessage}; use crate::actors::crawl_scheduler::{CrawlSchedulerHandle, CrawlSchedulerHandleMessage};
use crate::config::Config; use crate::config::Config;
use crate::domain_locks::DomainLocks; use crate::domain_locks::DomainLocks;
/// A map of feed IDs to a channel receiver for the active `CrawlScheduler` running a feed crawl /// A map of feed IDs to a channel receiver for the active `CrawlScheduler` running a feed crawl
/// for that feed. /// for that feed.
/// ///
/// Currently, the only purpose of this is to keep track of active crawls so that axum handlers can /// Currently, the only purpose of this is to keep track of active crawls so that axum handlers can
/// subscribe to the result of the crawl via the receiver channel which are then sent to end-users /// subscribe to the result of the crawl via the receiver channel which are then sent to end-users
/// as a stream of server-sent events. /// as a stream of server-sent events.
/// ///
/// This map should only contain crawls that have just been created but not yet subscribed to. /// This map should only contain crawls that have just been created but not yet subscribed to.
/// Entries are only added when a user adds a feed in the UI and entries are removed by the same /// Entries are only added when a user adds a feed in the UI and entries are removed by the same
/// user once a server-sent event connection is established. /// user once a server-sent event connection is established.
pub type Crawls = Arc<Mutex<HashMap<Uuid, broadcast::Receiver<CrawlSchedulerHandleMessage>>>>; pub type Crawls = Arc<Mutex<HashMap<Uuid, broadcast::Receiver<CrawlSchedulerHandleMessage>>>>;