Add /log page that displays server log

This will eventually be behind an authorization guard so that only I
have access, but it's useful to have for monitoring and testing out
turbo streams.
This commit is contained in:
Tyler Hallada 2023-06-03 01:09:25 -04:00
parent ea236dff4e
commit 951d6d23e2
11 changed files with 141 additions and 9 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
/target /target
/logs
.env .env

13
Cargo.lock generated
View File

@ -260,6 +260,7 @@ dependencies = [
"feed-rs", "feed-rs",
"maud", "maud",
"notify", "notify",
"once_cell",
"reqwest", "reqwest",
"serde", "serde",
"serde_with", "serde_with",
@ -270,6 +271,7 @@ dependencies = [
"tower-http", "tower-http",
"tower-livereload", "tower-livereload",
"tracing", "tracing",
"tracing-appender",
"tracing-subscriber", "tracing-subscriber",
"validator", "validator",
] ]
@ -2139,6 +2141,17 @@ dependencies = [
"tracing-core", "tracing-core",
] ]
[[package]]
name = "tracing-appender"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e"
dependencies = [
"crossbeam-channel",
"time 0.3.21",
"tracing-subscriber",
]
[[package]] [[package]]
name = "tracing-attributes" name = "tracing-attributes"
version = "0.1.24" version = "0.1.24"

View File

@ -19,6 +19,7 @@ dotenvy = "0.15"
feed-rs = "1.3" feed-rs = "1.3"
maud = { version = "0.25", features = ["axum"] } maud = { version = "0.25", features = ["axum"] }
notify = "6" notify = "6"
once_cell = "1.17"
reqwest = { version = "0.11", features = ["json"] } reqwest = { version = "0.11", features = ["json"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_with = "3" serde_with = "3"
@ -35,5 +36,6 @@ tower = "0.4"
tower-livereload = "0.7" tower-livereload = "0.7"
tower-http = { version = "0.4", features = ["trace"] } tower-http = { version = "0.4", features = ["trace"] }
tracing = "0.1" tracing = "0.1"
tracing-appender = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
validator = { version = "0.16", features = ["derive"] } validator = { version = "0.16", features = ["derive"] }

View File

@ -34,6 +34,7 @@ PORT=3000
DATABASE_URL=postgresql://crawlnicle:<password>@localhost/crawlnicle DATABASE_URL=postgresql://crawlnicle:<password>@localhost/crawlnicle
DATABASE_MAX_CONNECTIONS=5 DATABASE_MAX_CONNECTIONS=5
TITLE=crawlnicle TITLE=crawlnicle
MAX_MEM_LOG_SIZE=1000000
``` ```
4. Install 4. Install

View File

@ -7,6 +7,7 @@ pub struct Config {
pub host: String, pub host: String,
pub port: u16, pub port: u16,
pub title: String, pub title: String,
pub max_mem_log_size: usize,
} }
impl Config { impl Config {
@ -16,6 +17,7 @@ impl Config {
let host = std::env::var("HOST").context("HOST not set")?; let host = std::env::var("HOST").context("HOST not set")?;
let port = std::env::var("PORT").context("PORT not set")?.parse()?; let port = std::env::var("PORT").context("PORT not set")?.parse()?;
let title = std::env::var("TITLE").context("TITLE not set")?; let title = std::env::var("TITLE").context("TITLE not set")?;
let max_mem_log_size = std::env::var("MAX_MEM_LOG_SIZE").context("MAX_MEM_LOG_SIZE not set")?.parse()?;
Ok(Config { Ok(Config {
database_url, database_url,
@ -23,6 +25,7 @@ impl Config {
host, host,
port, port,
title, title,
max_mem_log_size,
}) })
} }
} }

13
src/handlers/log.rs Normal file
View File

@ -0,0 +1,13 @@
use axum::response::Response;
use maud::html;
use crate::error::Result;
use crate::partials::layout::Layout;
use crate::log::MEM_LOG;
pub async fn get(layout: Layout) -> Result<Response> {
let mem_buf = MEM_LOG.lock().unwrap();
Ok(layout.render(html! {
pre { (std::str::from_utf8(mem_buf.as_slices().0).unwrap()) }
}))
}

View File

@ -2,3 +2,4 @@ pub mod api;
pub mod entry; pub mod entry;
pub mod home; pub mod home;
pub mod feeds; pub mod feeds;
pub mod log;

View File

@ -2,6 +2,7 @@ pub mod config;
pub mod error; pub mod error;
pub mod handlers; pub mod handlers;
pub mod jobs; pub mod jobs;
pub mod log;
pub mod models; pub mod models;
pub mod partials; pub mod partials;
pub mod state; pub mod state;

94
src/log.rs Normal file
View File

@ -0,0 +1,94 @@
use std::sync::Mutex;
use std::{io::Write, collections::VecDeque};
use anyhow::Result;
use once_cell::sync::Lazy;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt::format, EnvFilter};
use crate::config::Config;
/// A shared in-memory buffer to store log bytes
pub static MEM_LOG: Lazy<Mutex<VecDeque<u8>>> = Lazy::new(|| Mutex::new(VecDeque::new()));
/// A `Writer` to a shared static in-memory buffer that stores bytes up until `max` bytes, at which
/// point it will truncate the buffer from the front up to the first newline byte `\n` within the
/// size limit.
///
/// This is useful for storing the last emitted log lines of an application in-memory without
/// needing to worry about the memory growing infinitely large.
///
/// `LimitedInMemoryBuffer` does not guarantee that the memory usage is less than `max`.
/// VecDeque`'s capacity may exceed `max` and it will only check and truncate the size of the
/// internal buffer *before* writing to it. It will continue to write, even if the size of the line
/// to write will make the buffer exceed `max`.
struct LimitedInMemoryBuffer {
pub buf: &'static Mutex<VecDeque<u8>>,
max: usize,
}
impl LimitedInMemoryBuffer {
fn new(buf: &'static Mutex<VecDeque<u8>>, max: usize) -> Self {
Self {
buf,
max,
}
}
/// Truncate the buffer to max bytes plus bytes before the closest newline starting from the
/// front
fn truncate(&mut self) {
let mut buf = self.buf.lock().unwrap();
let len = buf.len();
if len > self.max {
drop(buf.drain(..len - self.max));
let mut i = 0;
while i < buf.len() {
if buf[i] == b'\n' {
break;
}
i += 1;
}
drop(buf.drain(..i));
}
}
}
impl Write for LimitedInMemoryBuffer {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
// if self.buf is too big, truncate it to the closest newline starting from the front
self.truncate();
let mut mem_buf = self.buf.lock().unwrap();
mem_buf.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
let mut buf = self.buf.lock().unwrap();
buf.flush()
}
}
pub fn init_tracing(config: &Config) -> Result<(WorkerGuard, WorkerGuard)> {
let fmt_layer = tracing_subscriber::fmt::layer();
let filter_layer = EnvFilter::from_default_env();
let file_appender = tracing_appender::rolling::hourly("./logs", "log");
let (file_writer, file_writer_guard) = tracing_appender::non_blocking(file_appender);
let mem_writer = LimitedInMemoryBuffer::new(&MEM_LOG, config.max_mem_log_size);
let (mem_writer, mem_writer_guard) = tracing_appender::non_blocking(mem_writer);
let file_writer_layer = tracing_subscriber::fmt::layer()
.with_writer(file_writer)
.with_ansi(false)
.fmt_fields(format::PrettyFields::new().with_ansi(false));
let mem_writer_layer = tracing_subscriber::fmt::layer()
.with_writer(mem_writer)
.with_ansi(false)
.fmt_fields(format::PrettyFields::new().with_ansi(false));
tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.with(file_writer_layer)
.with(mem_writer_layer)
.init();
Ok((file_writer_guard, mem_writer_guard))
}

View File

@ -9,21 +9,22 @@ use dotenvy::dotenv;
use notify::Watcher; use notify::Watcher;
use sqlx::postgres::PgPoolOptions; use sqlx::postgres::PgPoolOptions;
use tower::ServiceBuilder; use tower::ServiceBuilder;
use tower_livereload::LiveReloadLayer;
use tower_http::trace::TraceLayer; use tower_http::trace::TraceLayer;
use tower_livereload::LiveReloadLayer;
use tracing::debug; use tracing::debug;
use lib::config; use lib::config::Config;
use lib::handlers; use lib::handlers;
use lib::state::AppState; use lib::state::AppState;
use lib::log::init_tracing;
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
dotenv().ok(); dotenv().ok();
let config = config::Config::new()?; let config = Config::new()?;
tracing_subscriber::fmt::init(); let _guards = init_tracing(&config)?;
let pool = PgPoolOptions::new() let pool = PgPoolOptions::new()
.max_connections(config.database_max_connections) .max_connections(config.database_max_connections)
@ -43,10 +44,8 @@ async fn main() -> Result<()> {
.route("/", get(handlers::home::get)) .route("/", get(handlers::home::get))
.route("/feeds", get(handlers::feeds::get)) .route("/feeds", get(handlers::feeds::get))
.route("/entry/:id", get(handlers::entry::get)) .route("/entry/:id", get(handlers::entry::get))
.with_state(AppState { .route("/log", get(handlers::log::get))
pool, .with_state(AppState { pool, config })
config,
})
.layer(ServiceBuilder::new().layer(TraceLayer::new_for_http())); .layer(ServiceBuilder::new().layer(TraceLayer::new_for_http()));
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -54,7 +53,10 @@ async fn main() -> Result<()> {
let livereload = LiveReloadLayer::new(); let livereload = LiveReloadLayer::new();
let reloader = livereload.reloader(); let reloader = livereload.reloader();
let mut watcher = notify::recommended_watcher(move |_| reloader.reload())?; let mut watcher = notify::recommended_watcher(move |_| reloader.reload())?;
watcher.watch(Path::new("target/debug/crawlnicle"), notify::RecursiveMode::Recursive)?; watcher.watch(
Path::new("target/debug/crawlnicle"),
notify::RecursiveMode::Recursive,
)?;
app = app.layer(livereload); app = app.layer(livereload);
} }

View File

@ -7,6 +7,7 @@ pub fn header() -> Markup {
h1 { a href="/" data-turbo-frame="main" { "crawlnicle" } } h1 { a href="/" data-turbo-frame="main" { "crawlnicle" } }
ul { ul {
li { a href="/feeds" data-turbo-frame="main" { "feeds" } } li { a href="/feeds" data-turbo-frame="main" { "feeds" } }
li { a href="/log" data-turbo-frame="main" { "log" } }
} }
} }
} }