Compare commits
No commits in common. "master" and "refactor-section-runner" have entirely different histories.
master
...
refactor-s
14
.gitignore
vendored
14
.gitignore
vendored
@ -1,14 +1,4 @@
|
||||
# Cargo/Rust
|
||||
/target
|
||||
Cargo.lock
|
||||
|
||||
# Visual studio code
|
||||
.vscode
|
||||
|
||||
# Sqlite databases
|
||||
/.vscode
|
||||
/*.db
|
||||
*.db-shm
|
||||
*.db-wal
|
||||
|
||||
# Config file
|
||||
/sprinklers_rs.json
|
||||
Cargo.lock
|
30
Cargo.toml
30
Cargo.toml
@ -1,10 +1,22 @@
|
||||
[workspace]
|
||||
[package]
|
||||
name = "sprinklers_rs"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
members = [
|
||||
"sprinklers_core",
|
||||
"sprinklers_database",
|
||||
"sprinklers_actors",
|
||||
"sprinklers_mqtt",
|
||||
"sprinklers_linux",
|
||||
"sprinklers_rs"
|
||||
]
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
rusqlite = "0.23.1"
|
||||
color-eyre = "0.5.1"
|
||||
eyre = "0.6.0"
|
||||
thiserror = "1.0.20"
|
||||
tokio = { version = "0.2.22", features = ["rt-core", "time", "sync", "macros", "test-util"] }
|
||||
tracing = { version = "0.1.19", features = ["log"] }
|
||||
tracing-futures = "0.2.4"
|
||||
pin-project = "0.4.23"
|
||||
|
||||
[dependencies.tracing-subscriber]
|
||||
version = "0.2.11"
|
||||
default-features = false
|
||||
features = ["registry", "fmt", "env-filter", "ansi"]
|
||||
|
@ -1,37 +0,0 @@
|
||||
[package]
|
||||
name = "sprinklers_actors"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
sprinklers_core = { path = "../sprinklers_core" }
|
||||
actix = { version = "0.10.0", default-features = false }
|
||||
thiserror = "1.0.20"
|
||||
tracing = "0.1.19"
|
||||
chrono = { version = "0.4.15" }
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
im = "15.0.0"
|
||||
eyre = "0.6.0"
|
||||
|
||||
[dependencies.tokio]
|
||||
version = "0.2.22"
|
||||
default-features = false
|
||||
features = []
|
||||
|
||||
[dependencies.futures-util]
|
||||
version = "0.3.5"
|
||||
default-features = false
|
||||
features = ["std", "async-await", "sink"]
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "1.1.1"
|
||||
tokio = { version = "0.2.22", features = ["test-util"] }
|
||||
assert_matches = "1.3.0"
|
||||
|
||||
[dev-dependencies.tracing-subscriber]
|
||||
version = "0.2.11"
|
||||
default-features = false
|
||||
features = ["registry"]
|
@ -1,10 +0,0 @@
|
||||
pub mod program_runner;
|
||||
pub mod state_manager;
|
||||
pub mod zone_runner;
|
||||
|
||||
#[cfg(test)]
|
||||
mod trace_listeners;
|
||||
|
||||
pub use program_runner::ProgramRunner;
|
||||
pub use state_manager::StateManager;
|
||||
pub use zone_runner::ZoneRunner;
|
File diff suppressed because it is too large
Load Diff
@ -1,72 +0,0 @@
|
||||
use sprinklers_core::model::{ProgramId, ProgramRef, ProgramUpdateData, Programs, Zones};
|
||||
use thiserror::Error;
|
||||
use tokio::sync::{mpsc, oneshot, watch};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Request {
|
||||
UpdateProgram {
|
||||
id: ProgramId,
|
||||
update: ProgramUpdateData,
|
||||
resp_tx: oneshot::Sender<Result<ProgramRef>>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StateManager {
|
||||
request_tx: mpsc::Sender<Request>,
|
||||
zones_watch: watch::Receiver<Zones>,
|
||||
programs_watch: watch::Receiver<Programs>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum StateError {
|
||||
#[error("no such program: {0}")]
|
||||
NoSuchProgram(ProgramId),
|
||||
#[error("internal error: {0}")]
|
||||
Other(
|
||||
#[from]
|
||||
#[source]
|
||||
eyre::Report,
|
||||
),
|
||||
}
|
||||
|
||||
pub type Result<T, E = StateError> = std::result::Result<T, E>;
|
||||
|
||||
impl StateManager {
|
||||
pub fn new(
|
||||
request_tx: mpsc::Sender<Request>,
|
||||
zones_watch: watch::Receiver<Zones>,
|
||||
programs_watch: watch::Receiver<Programs>,
|
||||
) -> Self {
|
||||
Self {
|
||||
request_tx,
|
||||
zones_watch,
|
||||
programs_watch,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_program(
|
||||
&mut self,
|
||||
id: ProgramId,
|
||||
update: ProgramUpdateData,
|
||||
) -> Result<ProgramRef> {
|
||||
let (resp_tx, resp_rx) = oneshot::channel();
|
||||
self.request_tx
|
||||
.send(Request::UpdateProgram {
|
||||
id,
|
||||
update,
|
||||
resp_tx,
|
||||
})
|
||||
.await
|
||||
.map_err(eyre::Report::from)?;
|
||||
resp_rx.await.map_err(eyre::Report::from)?
|
||||
}
|
||||
|
||||
pub fn get_zones(&self) -> watch::Receiver<Zones> {
|
||||
self.zones_watch.clone()
|
||||
}
|
||||
|
||||
pub fn get_programs(&self) -> watch::Receiver<Programs> {
|
||||
self.programs_watch.clone()
|
||||
}
|
||||
}
|
@ -1,891 +0,0 @@
|
||||
use sprinklers_core::model::{ZoneId, ZoneRef};
|
||||
use sprinklers_core::zone_interface::ZoneInterface;
|
||||
|
||||
use actix::{
|
||||
Actor, ActorContext, Addr, AsyncContext, Handler, Message, MessageResult, SpawnHandle,
|
||||
};
|
||||
use futures_util::TryFutureExt;
|
||||
use std::{
|
||||
future::Future,
|
||||
mem::swap,
|
||||
sync::{
|
||||
atomic::{AtomicI32, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
sync::{broadcast, watch},
|
||||
time::Instant,
|
||||
};
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, serde::Deserialize, serde::Serialize)]
|
||||
pub struct ZoneRunHandle(i32);
|
||||
|
||||
impl ZoneRunHandle {
|
||||
pub fn into_inner(self) -> i32 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ZoneEvent {
|
||||
RunStart(ZoneRunHandle, ZoneRef),
|
||||
RunFinish(ZoneRunHandle, ZoneRef),
|
||||
RunPause(ZoneRunHandle, ZoneRef),
|
||||
RunUnpause(ZoneRunHandle, ZoneRef),
|
||||
RunCancel(ZoneRunHandle, ZoneRef),
|
||||
RunnerPause,
|
||||
RunnerUnpause,
|
||||
}
|
||||
|
||||
pub type ZoneEventRecv = broadcast::Receiver<ZoneEvent>;
|
||||
type ZoneEventSend = broadcast::Sender<ZoneEvent>;
|
||||
|
||||
const EVENT_CAPACITY: usize = 8;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum ZoneRunState {
|
||||
Waiting,
|
||||
Running {
|
||||
start_time: Instant,
|
||||
},
|
||||
Finished,
|
||||
Cancelled,
|
||||
Paused {
|
||||
start_time: Instant,
|
||||
pause_time: Instant,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ZoneRun {
|
||||
pub handle: ZoneRunHandle,
|
||||
pub zone: ZoneRef,
|
||||
pub duration: Duration,
|
||||
pub total_duration: Duration,
|
||||
pub state: ZoneRunState,
|
||||
}
|
||||
|
||||
impl ZoneRun {
|
||||
fn new(handle: ZoneRunHandle, zone: ZoneRef, duration: Duration) -> Self {
|
||||
Self {
|
||||
handle,
|
||||
zone,
|
||||
duration,
|
||||
total_duration: duration,
|
||||
state: ZoneRunState::Waiting,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_running(&self) -> bool {
|
||||
matches!(self.state, ZoneRunState::Running{..})
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn is_paused(&self) -> bool {
|
||||
matches!(self.state, ZoneRunState::Paused{..})
|
||||
}
|
||||
}
|
||||
|
||||
pub type ZoneRunQueue = im::Vector<Arc<ZoneRun>>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ZoneRunnerState {
|
||||
pub run_queue: ZoneRunQueue,
|
||||
pub paused: bool,
|
||||
}
|
||||
|
||||
impl Default for ZoneRunnerState {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
run_queue: ZoneRunQueue::default(),
|
||||
paused: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type ZoneRunnerStateRecv = watch::Receiver<ZoneRunnerState>;
|
||||
|
||||
struct ZoneRunnerInner {
|
||||
interface: Arc<dyn ZoneInterface>,
|
||||
event_send: Option<ZoneEventSend>,
|
||||
state_send: watch::Sender<ZoneRunnerState>,
|
||||
delay_future: Option<SpawnHandle>,
|
||||
did_change: bool,
|
||||
}
|
||||
|
||||
impl ZoneRunnerInner {
|
||||
fn send_event(&mut self, event: ZoneEvent) {
|
||||
if let Some(event_send) = &mut self.event_send {
|
||||
match event_send.send(event) {
|
||||
Ok(_) => {}
|
||||
Err(_closed) => {
|
||||
self.event_send = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn subscribe_event(&mut self) -> ZoneEventRecv {
|
||||
match &mut self.event_send {
|
||||
Some(event_send) => event_send.subscribe(),
|
||||
None => {
|
||||
let (event_send, event_recv) = broadcast::channel(EVENT_CAPACITY);
|
||||
self.event_send = Some(event_send);
|
||||
event_recv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn start_run(&mut self, run: &mut Arc<ZoneRun>) {
|
||||
use ZoneRunState::*;
|
||||
let run = Arc::make_mut(run);
|
||||
debug!(zone_id = run.zone.id, "starting running zone");
|
||||
self.interface.set_zone_state(run.zone.interface_id, true);
|
||||
run.state = Running {
|
||||
start_time: Instant::now(),
|
||||
};
|
||||
self.send_event(ZoneEvent::RunStart(run.handle.clone(), run.zone.clone()));
|
||||
self.did_change = true;
|
||||
}
|
||||
|
||||
fn finish_run(&mut self, run: &mut Arc<ZoneRun>) {
|
||||
let run = Arc::make_mut(run);
|
||||
if run.is_running() {
|
||||
debug!(zone_id = run.zone.id, "finished running zone");
|
||||
self.interface.set_zone_state(run.zone.interface_id, false);
|
||||
run.state = ZoneRunState::Finished;
|
||||
self.send_event(ZoneEvent::RunFinish(run.handle.clone(), run.zone.clone()));
|
||||
self.did_change = true;
|
||||
} else {
|
||||
warn!(
|
||||
zone_id = run.zone.id,
|
||||
state = debug(&run.state),
|
||||
"cannot finish run which is not running"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn cancel_run(&mut self, run: &mut Arc<ZoneRun>) -> bool {
|
||||
let run = Arc::make_mut(run);
|
||||
if run.is_running() {
|
||||
debug!(zone_id = run.zone.id, "cancelling running zone");
|
||||
self.interface.set_zone_state(run.zone.interface_id, false);
|
||||
}
|
||||
if run.state != ZoneRunState::Cancelled {
|
||||
run.state = ZoneRunState::Cancelled;
|
||||
self.send_event(ZoneEvent::RunCancel(run.handle.clone(), run.zone.clone()));
|
||||
self.did_change = true;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn pause_run(&mut self, run: &mut Arc<ZoneRun>) {
|
||||
use ZoneRunState::*;
|
||||
let run = Arc::make_mut(run);
|
||||
let new_state = match run.state {
|
||||
Running { start_time } => {
|
||||
debug!(zone_id = run.zone.id, "pausing running zone");
|
||||
self.interface.set_zone_state(run.zone.interface_id, false);
|
||||
Paused {
|
||||
start_time,
|
||||
pause_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
Waiting => {
|
||||
debug!(zone_id = run.zone.id, "pausing waiting zone");
|
||||
Paused {
|
||||
start_time: Instant::now(),
|
||||
pause_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
Finished | Cancelled | Paused { .. } => {
|
||||
return;
|
||||
}
|
||||
};
|
||||
run.state = new_state;
|
||||
self.send_event(ZoneEvent::RunPause(run.handle.clone(), run.zone.clone()));
|
||||
self.did_change = true;
|
||||
}
|
||||
|
||||
fn unpause_run(&mut self, run: &mut Arc<ZoneRun>) {
|
||||
use ZoneRunState::*;
|
||||
let run = Arc::make_mut(run);
|
||||
match run.state {
|
||||
Paused {
|
||||
start_time,
|
||||
pause_time,
|
||||
} => {
|
||||
debug!(zone_id = run.zone.id, "unpausing zone");
|
||||
self.interface.set_zone_state(run.zone.interface_id, true);
|
||||
run.state = Running {
|
||||
start_time: Instant::now(),
|
||||
};
|
||||
let ran_for = pause_time - start_time;
|
||||
run.duration -= ran_for;
|
||||
self.send_event(ZoneEvent::RunUnpause(run.handle.clone(), run.zone.clone()));
|
||||
}
|
||||
Waiting | Finished | Cancelled | Running { .. } => {
|
||||
warn!(
|
||||
zone_id = run.zone.id,
|
||||
state = debug(&run.state),
|
||||
"can only unpause paused zone"
|
||||
);
|
||||
}
|
||||
}
|
||||
self.did_change = true;
|
||||
}
|
||||
|
||||
fn process_after_delay(
|
||||
&mut self,
|
||||
after: Duration,
|
||||
ctx: &mut <ZoneRunnerActor as Actor>::Context,
|
||||
) {
|
||||
let delay_future = ctx.notify_later(Process, after);
|
||||
if let Some(old_future) = self.delay_future.replace(delay_future) {
|
||||
ctx.cancel_future(old_future);
|
||||
}
|
||||
}
|
||||
|
||||
fn cancel_process(&mut self, ctx: &mut <ZoneRunnerActor as Actor>::Context) {
|
||||
if let Some(old_future) = self.delay_future.take() {
|
||||
ctx.cancel_future(old_future);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ZoneRunnerActor {
|
||||
state: ZoneRunnerState,
|
||||
inner: ZoneRunnerInner,
|
||||
}
|
||||
|
||||
impl Actor for ZoneRunnerActor {
|
||||
type Context = actix::Context<Self>;
|
||||
|
||||
fn started(&mut self, _ctx: &mut Self::Context) {
|
||||
trace!("zone_runner starting");
|
||||
for i in 0..self.inner.interface.num_zones() {
|
||||
self.inner.interface.set_zone_state(i, false);
|
||||
}
|
||||
}
|
||||
|
||||
fn stopped(&mut self, _ctx: &mut Self::Context) {
|
||||
trace!("zone_runner stopped");
|
||||
for i in 0..self.inner.interface.num_zones() {
|
||||
self.inner.interface.set_zone_state(i, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "()")]
|
||||
struct Quit;
|
||||
|
||||
impl Handler<Quit> for ZoneRunnerActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, _msg: Quit, ctx: &mut Self::Context) -> Self::Result {
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "()")]
|
||||
struct QueueRun(ZoneRunHandle, ZoneRef, Duration);
|
||||
|
||||
impl Handler<QueueRun> for ZoneRunnerActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, msg: QueueRun, ctx: &mut Self::Context) -> Self::Result {
|
||||
let QueueRun(handle, zone, duration) = msg;
|
||||
|
||||
let run: Arc<ZoneRun> = ZoneRun::new(handle, zone, duration).into();
|
||||
self.state.run_queue.push_back(run);
|
||||
self.inner.did_change = true;
|
||||
|
||||
ctx.notify(Process);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "bool")]
|
||||
struct CancelRun(ZoneRunHandle);
|
||||
|
||||
impl Handler<CancelRun> for ZoneRunnerActor {
|
||||
type Result = bool;
|
||||
|
||||
fn handle(&mut self, msg: CancelRun, ctx: &mut Self::Context) -> Self::Result {
|
||||
let CancelRun(handle) = msg;
|
||||
let mut cancelled = false;
|
||||
for run in self
|
||||
.state
|
||||
.run_queue
|
||||
.iter_mut()
|
||||
.filter(|run| run.handle == handle)
|
||||
{
|
||||
trace!(handle = handle.0, "cancelling run by handle");
|
||||
cancelled = self.inner.cancel_run(run);
|
||||
}
|
||||
ctx.notify(Process);
|
||||
cancelled
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "usize")]
|
||||
struct CancelByZone(ZoneId);
|
||||
|
||||
impl Handler<CancelByZone> for ZoneRunnerActor {
|
||||
type Result = usize;
|
||||
|
||||
fn handle(&mut self, msg: CancelByZone, ctx: &mut Self::Context) -> Self::Result {
|
||||
let CancelByZone(zone_id) = msg;
|
||||
let mut count = 0_usize;
|
||||
for run in self
|
||||
.state
|
||||
.run_queue
|
||||
.iter_mut()
|
||||
.filter(|run| run.zone.id == zone_id)
|
||||
{
|
||||
trace!(handle = run.handle.0, zone_id, "cancelling run by zone");
|
||||
if self.inner.cancel_run(run) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
ctx.notify(Process);
|
||||
count
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "usize")]
|
||||
struct CancelAll;
|
||||
|
||||
impl Handler<CancelAll> for ZoneRunnerActor {
|
||||
type Result = usize;
|
||||
|
||||
fn handle(&mut self, _msg: CancelAll, ctx: &mut Self::Context) -> Self::Result {
|
||||
let mut old_runs = ZoneRunQueue::new();
|
||||
swap(&mut old_runs, &mut self.state.run_queue);
|
||||
trace!(count = old_runs.len(), "cancelling all runs");
|
||||
let mut count = 0usize;
|
||||
for mut run in old_runs {
|
||||
if self.inner.cancel_run(&mut run) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
ctx.notify(Process);
|
||||
count
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "()")]
|
||||
struct SetPaused(bool);
|
||||
|
||||
impl Handler<SetPaused> for ZoneRunnerActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, msg: SetPaused, ctx: &mut Self::Context) -> Self::Result {
|
||||
let SetPaused(pause) = msg;
|
||||
if pause != self.state.paused {
|
||||
if pause {
|
||||
self.state.paused = true;
|
||||
self.inner.send_event(ZoneEvent::RunnerPause);
|
||||
} else {
|
||||
self.state.paused = false;
|
||||
self.inner.send_event(ZoneEvent::RunnerUnpause);
|
||||
}
|
||||
self.inner.did_change = true;
|
||||
ctx.notify(Process);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "ZoneEventRecv")]
|
||||
struct Subscribe;
|
||||
|
||||
impl Handler<Subscribe> for ZoneRunnerActor {
|
||||
type Result = MessageResult<Subscribe>;
|
||||
|
||||
fn handle(&mut self, _msg: Subscribe, _ctx: &mut Self::Context) -> Self::Result {
|
||||
let event_recv = self.inner.subscribe_event();
|
||||
MessageResult(event_recv)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Message, Debug, Clone)]
|
||||
#[rtype(result = "()")]
|
||||
struct Process;
|
||||
|
||||
impl Handler<Process> for ZoneRunnerActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, _msg: Process, ctx: &mut Self::Context) -> Self::Result {
|
||||
self.process(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
impl ZoneRunnerActor {
|
||||
fn new(interface: Arc<dyn ZoneInterface>, state_send: watch::Sender<ZoneRunnerState>) -> Self {
|
||||
Self {
|
||||
state: ZoneRunnerState::default(),
|
||||
inner: ZoneRunnerInner {
|
||||
interface,
|
||||
event_send: None,
|
||||
state_send,
|
||||
delay_future: None,
|
||||
did_change: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn process_queue(&mut self, ctx: &mut actix::Context<Self>) {
|
||||
use ZoneRunState::*;
|
||||
let state = &mut self.state;
|
||||
while let Some(current_run) = state.run_queue.front_mut() {
|
||||
let run_finished = match (¤t_run.state, state.paused) {
|
||||
(Waiting, false) => {
|
||||
self.inner.start_run(current_run);
|
||||
self.inner.process_after_delay(current_run.duration, ctx);
|
||||
false
|
||||
}
|
||||
(Running { start_time }, false) => {
|
||||
let time_to_finish = start_time.elapsed() >= current_run.duration;
|
||||
if time_to_finish {
|
||||
self.inner.finish_run(current_run);
|
||||
self.inner.cancel_process(ctx);
|
||||
}
|
||||
time_to_finish
|
||||
}
|
||||
(Paused { .. }, false) => {
|
||||
self.inner.unpause_run(current_run);
|
||||
self.inner.process_after_delay(current_run.duration, ctx);
|
||||
false
|
||||
}
|
||||
(Waiting, true) => {
|
||||
self.inner.pause_run(current_run);
|
||||
false
|
||||
}
|
||||
(Running { .. }, true) => {
|
||||
self.inner.pause_run(current_run);
|
||||
self.inner.cancel_process(ctx);
|
||||
false
|
||||
}
|
||||
(Paused { .. }, true) => false,
|
||||
(Cancelled, _) | (Finished, _) => true,
|
||||
};
|
||||
|
||||
if run_finished {
|
||||
state.run_queue.pop_front();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process(&mut self, ctx: &mut actix::Context<Self>) {
|
||||
self.process_queue(ctx);
|
||||
|
||||
if self.inner.did_change {
|
||||
let _ = self.inner.state_send.broadcast(self.state.clone());
|
||||
self.inner.did_change = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Error)]
|
||||
#[error("error communicating with ZoneRunner: {0}")]
|
||||
pub struct Error(#[from] actix::MailboxError);
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ZoneRunner {
|
||||
state_recv: ZoneRunnerStateRecv,
|
||||
addr: Addr<ZoneRunnerActor>,
|
||||
next_run_id: Arc<AtomicI32>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl ZoneRunner {
|
||||
pub fn new(interface: Arc<dyn ZoneInterface>) -> Self {
|
||||
let (state_send, state_recv) = watch::channel(ZoneRunnerState::default());
|
||||
let addr = ZoneRunnerActor::new(interface, state_send).start();
|
||||
Self {
|
||||
state_recv,
|
||||
addr,
|
||||
next_run_id: Arc::new(AtomicI32::new(1)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn quit(&mut self) -> impl Future<Output = Result<()>> {
|
||||
self.addr.send(Quit).map_err(From::from)
|
||||
}
|
||||
|
||||
fn queue_run_inner(&mut self, zone: ZoneRef, duration: Duration) -> (QueueRun, ZoneRunHandle) {
|
||||
let run_id = self.next_run_id.fetch_add(1, Ordering::SeqCst);
|
||||
let handle = ZoneRunHandle(run_id);
|
||||
(QueueRun(handle.clone(), zone, duration), handle)
|
||||
}
|
||||
|
||||
pub fn do_queue_run(&mut self, zone: ZoneRef, duration: Duration) -> ZoneRunHandle {
|
||||
let (queue_run, handle) = self.queue_run_inner(zone, duration);
|
||||
self.addr.do_send(queue_run);
|
||||
handle
|
||||
}
|
||||
|
||||
pub fn queue_run(
|
||||
&mut self,
|
||||
zone: ZoneRef,
|
||||
duration: Duration,
|
||||
) -> impl Future<Output = Result<ZoneRunHandle>> {
|
||||
let (queue_run, handle) = self.queue_run_inner(zone, duration);
|
||||
self.addr
|
||||
.send(queue_run)
|
||||
.map_err(From::from)
|
||||
.map_ok(move |_| handle)
|
||||
}
|
||||
|
||||
pub fn do_cancel_run(&mut self, handle: ZoneRunHandle) {
|
||||
self.addr.do_send(CancelRun(handle))
|
||||
}
|
||||
|
||||
pub fn cancel_run(&mut self, handle: ZoneRunHandle) -> impl Future<Output = Result<bool>> {
|
||||
self.addr.send(CancelRun(handle)).map_err(From::from)
|
||||
}
|
||||
|
||||
pub fn cancel_by_zone(&mut self, zone_id: ZoneId) -> impl Future<Output = Result<usize>> {
|
||||
self.addr.send(CancelByZone(zone_id)).map_err(From::from)
|
||||
}
|
||||
|
||||
pub fn cancel_all(&mut self) -> impl Future<Output = Result<usize>> {
|
||||
self.addr.send(CancelAll).map_err(From::from)
|
||||
}
|
||||
|
||||
pub fn pause(&mut self) -> impl Future<Output = Result<()>> {
|
||||
self.addr.send(SetPaused(true)).map_err(From::from)
|
||||
}
|
||||
|
||||
pub fn unpause(&mut self) -> impl Future<Output = Result<()>> {
|
||||
self.addr.send(SetPaused(false)).map_err(From::from)
|
||||
}
|
||||
|
||||
pub fn subscribe(&mut self) -> impl Future<Output = Result<ZoneEventRecv>> {
|
||||
self.addr.send(Subscribe).map_err(From::from)
|
||||
}
|
||||
|
||||
pub fn get_state_recv(&self) -> ZoneRunnerStateRecv {
|
||||
self.state_recv.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::trace_listeners::{EventListener, Filters};
|
||||
use sprinklers_core::{
|
||||
model::{Zone, Zones},
|
||||
zone_interface::MockZoneInterface,
|
||||
};
|
||||
|
||||
use assert_matches::assert_matches;
|
||||
use im::ordmap;
|
||||
use tracing_subscriber::prelude::*;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_quit() {
|
||||
let quit_msg = EventListener::new(
|
||||
Filters::new()
|
||||
.target("sprinklers_actors::zone_runner")
|
||||
.message("zone_runner stopped"),
|
||||
);
|
||||
let subscriber = tracing_subscriber::registry().with(quit_msg.clone());
|
||||
let _sub = tracing::subscriber::set_default(subscriber);
|
||||
|
||||
let interface = MockZoneInterface::new(6);
|
||||
let mut runner = ZoneRunner::new(Arc::new(interface));
|
||||
tokio::task::yield_now().await;
|
||||
runner.quit().await.unwrap();
|
||||
|
||||
assert_eq!(quit_msg.get_count(), 1);
|
||||
}
|
||||
|
||||
fn make_zones_and_interface() -> (Zones, Arc<MockZoneInterface>) {
|
||||
let interface = Arc::new(MockZoneInterface::new(2));
|
||||
let zones: Zones = ordmap![
|
||||
1 => Zone {
|
||||
id: 1,
|
||||
name: "Zone 1".into(),
|
||||
interface_id: 0,
|
||||
}.into(),
|
||||
2 => Zone {
|
||||
id: 2,
|
||||
name: "Zone 2".into(),
|
||||
interface_id: 1,
|
||||
}.into()
|
||||
];
|
||||
(zones, interface)
|
||||
}
|
||||
|
||||
fn assert_zone_states(interface: &MockZoneInterface, states: &[bool]) {
|
||||
for (id, state) in states.iter().enumerate() {
|
||||
assert_eq!(
|
||||
interface.get_zone_state(id as u32),
|
||||
*state,
|
||||
"zone interface id {} did not match",
|
||||
id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn advance(dur: Duration) {
|
||||
// HACK: advance should really be enough, but we need another yield_now
|
||||
tokio::time::pause();
|
||||
tokio::time::advance(dur).await;
|
||||
tokio::task::yield_now().await;
|
||||
tokio::time::resume();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_queue() {
|
||||
let (zones, interface) = make_zones_and_interface();
|
||||
let mut runner = ZoneRunner::new(interface.clone());
|
||||
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
// Queue single zone, make sure it runs
|
||||
runner
|
||||
.queue_run(zones[&1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_zone_states(&interface, &[true, false]);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
// Queue two zones, make sure they run one at a time
|
||||
runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
runner
|
||||
.queue_run(zones[&1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
|
||||
assert_zone_states(&interface, &[true, false]);
|
||||
|
||||
advance(Duration::from_secs(10)).await;
|
||||
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_cancel_run() {
|
||||
let (zones, interface) = make_zones_and_interface();
|
||||
let mut runner = ZoneRunner::new(interface.clone());
|
||||
|
||||
let run1 = runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _run2 = runner
|
||||
.queue_run(zones[&1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let run3 = runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
runner.cancel_run(run1).await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_zone_states(&interface, &[true, false]);
|
||||
|
||||
runner.cancel_run(run3).await.unwrap();
|
||||
advance(Duration::from_secs(11)).await;
|
||||
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_cancel_all() {
|
||||
let (zones, interface) = make_zones_and_interface();
|
||||
let mut runner = ZoneRunner::new(interface.clone());
|
||||
|
||||
runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
runner
|
||||
.queue_run(zones[&1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
runner.cancel_all().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.cancel_all().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_pause() {
|
||||
let (zones, interface) = make_zones_and_interface();
|
||||
let mut runner = ZoneRunner::new(interface.clone());
|
||||
|
||||
let _run1 = runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let run2 = runner
|
||||
.queue_run(zones[&1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _run3 = runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
runner.pause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
advance(Duration::from_secs(10)).await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.unpause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(8)).await;
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(2)).await;
|
||||
assert_zone_states(&interface, &[true, false]);
|
||||
|
||||
runner.pause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
// cancel paused run
|
||||
runner.cancel_run(run2).await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.unpause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_zone_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
assert_zone_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_event() {
|
||||
let (zones, interface) = make_zones_and_interface();
|
||||
let mut runner = ZoneRunner::new(interface.clone());
|
||||
|
||||
let mut event_recv = runner.subscribe().await.unwrap();
|
||||
|
||||
let run1 = runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let run2 = runner
|
||||
.queue_run(zones[&1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let run3 = runner
|
||||
.queue_run(zones[&2].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_matches!(
|
||||
event_recv.recv().await,
|
||||
Ok(ZoneEvent::RunStart(handle, _))
|
||||
if handle == run1
|
||||
);
|
||||
|
||||
runner.pause().await.unwrap();
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunnerPause));
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunPause(handle, _)) if handle == run1);
|
||||
|
||||
runner.unpause().await.unwrap();
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunnerUnpause));
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunUnpause(handle, _)) if handle == run1);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunFinish(handle, _)) if handle == run1);
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunStart(handle, _)) if handle == run2);
|
||||
|
||||
runner.pause().await.unwrap();
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunnerPause));
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunPause(handle, _)) if handle == run2);
|
||||
|
||||
// cancel paused run
|
||||
runner.cancel_run(run2.clone()).await.unwrap();
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunCancel(handle, _)) if handle == run2);
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunPause(handle, _)) if handle == run3);
|
||||
|
||||
runner.unpause().await.unwrap();
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunnerUnpause));
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunUnpause(handle, _)) if handle == run3);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
assert_matches!(event_recv.recv().await, Ok(ZoneEvent::RunFinish(handle, _)) if handle == run3);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
}
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "sprinklers_core"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
chrono = { version = "0.4.15" }
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
im = "15.0.0"
|
||||
tracing = { version = "0.1.19" }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1.0.57"
|
@ -1,4 +0,0 @@
|
||||
pub mod model;
|
||||
pub mod schedule;
|
||||
pub mod serde;
|
||||
pub mod zone_interface;
|
@ -1,7 +0,0 @@
|
||||
//! Domain specific data models
|
||||
|
||||
mod program;
|
||||
mod zone;
|
||||
|
||||
pub use program::*;
|
||||
pub use zone::*;
|
@ -1,41 +0,0 @@
|
||||
use super::zone::ZoneId;
|
||||
use crate::schedule::Schedule;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ProgramItem {
|
||||
// TODO: update nomenclature
|
||||
#[serde(rename = "sectionId")]
|
||||
pub zone_id: ZoneId,
|
||||
#[serde(with = "crate::serde::duration_secs")]
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
pub type ProgramSequence = Vec<ProgramItem>;
|
||||
|
||||
pub type ProgramId = u32;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Program {
|
||||
pub id: ProgramId,
|
||||
pub name: String,
|
||||
pub sequence: ProgramSequence,
|
||||
pub enabled: bool,
|
||||
pub schedule: Schedule,
|
||||
}
|
||||
|
||||
pub type ProgramRef = Arc<Program>;
|
||||
|
||||
pub type Programs = im::OrdMap<ProgramId, ProgramRef>;
|
||||
|
||||
#[derive(Default, Debug, Serialize, Deserialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct ProgramUpdateData {
|
||||
pub name: Option<String>,
|
||||
pub sequence: Option<ProgramSequence>,
|
||||
pub enabled: Option<bool>,
|
||||
pub schedule: Option<Schedule>,
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
//! Data models for sprinklers zones
|
||||
//!
|
||||
//! A zone represents a group of sprinkler heads actuated by a single
|
||||
//! valve. Physically controllable (or virtual) valves are handled by implementations of
|
||||
//! [ZoneInterface](../../zone_interface/trait.ZoneInterface.html), but the model
|
||||
//! describes a logical zone and how it maps to a physical one.
|
||||
|
||||
use crate::zone_interface::ZoneNum;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Identifying integer type for a Zone
|
||||
pub type ZoneId = u32;
|
||||
|
||||
/// A single logical zone
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Zone {
|
||||
pub id: ZoneId,
|
||||
pub name: String,
|
||||
/// ID number of the corresponding physical zone
|
||||
pub interface_id: ZoneNum,
|
||||
}
|
||||
|
||||
pub type ZoneRef = Arc<Zone>;
|
||||
|
||||
pub type Zones = im::OrdMap<ZoneId, ZoneRef>;
|
@ -1,645 +0,0 @@
|
||||
//! Scheduling for events to run at certain intervals in the future
|
||||
|
||||
use chrono::{Date, DateTime, Datelike, Local, NaiveDateTime, NaiveTime, TimeZone, Weekday};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp;
|
||||
use std::iter::FromIterator;
|
||||
|
||||
pub use chrono::Duration;
|
||||
|
||||
/// A set of times of day (for [Schedule](struct.Schedule.html))
|
||||
pub type TimeSet = Vec<NaiveTime>;
|
||||
/// A set of days of week (for [Schedule](struct.Schedule.html))
|
||||
pub type WeekdaySet = Vec<Weekday>;
|
||||
|
||||
/// Returns a [`WeekdaySet`](type.WeekdaySet.html) of every day of the week
|
||||
#[allow(dead_code)]
|
||||
pub fn every_day() -> WeekdaySet {
|
||||
WeekdaySet::from_iter(
|
||||
[
|
||||
Weekday::Mon,
|
||||
Weekday::Tue,
|
||||
Weekday::Wed,
|
||||
Weekday::Thu,
|
||||
Weekday::Fri,
|
||||
Weekday::Sat,
|
||||
Weekday::Sun,
|
||||
]
|
||||
.iter()
|
||||
.cloned(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Represents the different types of date-time bounds that can be on a schedule
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DateTimeBound {
|
||||
/// There is no bound (ie. the Schedule extends with no limit)
|
||||
None,
|
||||
/// There is a bound that repeats every year (ie. the year is set to the current year)
|
||||
Yearly(NaiveDateTime),
|
||||
/// There is a definite bound on the schedule
|
||||
Definite(NaiveDateTime),
|
||||
}
|
||||
|
||||
impl Default for DateTimeBound {
|
||||
fn default() -> DateTimeBound {
|
||||
DateTimeBound::None
|
||||
}
|
||||
}
|
||||
|
||||
impl DateTimeBound {
|
||||
/// Resolves this bound into an optional `DateTime`. If there is no bound or the bound could
|
||||
/// not be resolved, None is returned.
|
||||
///
|
||||
/// `reference` is the reference that is used to resolve a `Yearly` bound.
|
||||
pub fn resolve_from<Tz: TimeZone>(&self, reference: &DateTime<Tz>) -> Option<DateTime<Tz>> {
|
||||
match *self {
|
||||
DateTimeBound::None => None,
|
||||
DateTimeBound::Yearly(date_time) => {
|
||||
date_time.with_year(reference.year()).and_then(|date_time| {
|
||||
reference
|
||||
.timezone()
|
||||
.from_local_datetime(&date_time)
|
||||
.single()
|
||||
})
|
||||
}
|
||||
DateTimeBound::Definite(date_time) => reference
|
||||
.timezone()
|
||||
.from_local_datetime(&date_time)
|
||||
.single(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A schedule that determines when an event will occur.
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Schedule {
|
||||
#[serde(
|
||||
serialize_with = "ser::serialize_times",
|
||||
deserialize_with = "ser::deserialize_times"
|
||||
)]
|
||||
pub times: TimeSet,
|
||||
#[serde(
|
||||
serialize_with = "ser::serialize_weekdays",
|
||||
deserialize_with = "ser::deserialize_weekdays"
|
||||
)]
|
||||
pub weekdays: WeekdaySet,
|
||||
pub from: DateTimeBound,
|
||||
pub to: DateTimeBound,
|
||||
}
|
||||
|
||||
mod ser {
|
||||
use super::{DateTimeBound, TimeSet, WeekdaySet};
|
||||
use chrono::{NaiveDate, NaiveTime, Weekday};
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::{convert::TryInto, fmt};
|
||||
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
|
||||
struct TimeOfDay {
|
||||
hour: u32,
|
||||
minute: u32,
|
||||
second: u32,
|
||||
#[serde(default)]
|
||||
millisecond: u32,
|
||||
}
|
||||
|
||||
impl From<&NaiveTime> for TimeOfDay {
|
||||
fn from(time: &NaiveTime) -> Self {
|
||||
use chrono::Timelike;
|
||||
Self {
|
||||
hour: time.hour(),
|
||||
minute: time.minute(),
|
||||
second: time.second(),
|
||||
millisecond: time.nanosecond() / 1_000_000_u32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct InvalidTimeOfDay;
|
||||
|
||||
impl fmt::Display for InvalidTimeOfDay {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"invalid time of day".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<NaiveTime> for TimeOfDay {
|
||||
type Error = InvalidTimeOfDay;
|
||||
fn try_into(self) -> Result<NaiveTime, Self::Error> {
|
||||
NaiveTime::from_hms_milli_opt(self.hour, self.minute, self.second, self.millisecond)
|
||||
.ok_or(InvalidTimeOfDay)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
pub fn serialize_times<S>(times: &TimeSet, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.collect_seq(times.iter().map(TimeOfDay::from))
|
||||
}
|
||||
|
||||
pub fn deserialize_times<'de, D>(deserializer: D) -> Result<TimeSet, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
use serde::de::{Error, SeqAccess, Visitor};
|
||||
|
||||
struct TimeSetVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for TimeSetVisitor {
|
||||
type Value = TimeSet;
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a sequence of time of days")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let mut times = TimeSet::with_capacity(seq.size_hint().unwrap_or(0));
|
||||
while let Some(value) = seq.next_element::<TimeOfDay>()? {
|
||||
let time: NaiveTime = value.try_into().map_err(A::Error::custom)?;
|
||||
times.push(time);
|
||||
}
|
||||
Ok(times)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_seq(TimeSetVisitor)
|
||||
}
|
||||
|
||||
#[allow(clippy::ptr_arg)]
|
||||
pub fn serialize_weekdays<S>(weekdays: &WeekdaySet, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let iter = weekdays
|
||||
.iter()
|
||||
.map(|weekday| weekday.num_days_from_sunday());
|
||||
serializer.collect_seq(iter)
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct InvalidWeekday;
|
||||
|
||||
impl fmt::Display for InvalidWeekday {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"weekday out of range 0 to 6".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
fn weekday_from_days_from_sunday(days: u32) -> Result<Weekday, InvalidWeekday> {
|
||||
Ok(match days {
|
||||
0 => Weekday::Sun,
|
||||
1 => Weekday::Mon,
|
||||
2 => Weekday::Tue,
|
||||
3 => Weekday::Wed,
|
||||
4 => Weekday::Thu,
|
||||
5 => Weekday::Fri,
|
||||
6 => Weekday::Sat,
|
||||
_ => return Err(InvalidWeekday),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn deserialize_weekdays<'de, D>(deserializer: D) -> Result<WeekdaySet, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
use serde::de::{Error, SeqAccess, Visitor};
|
||||
|
||||
struct WeekdaySetVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for WeekdaySetVisitor {
|
||||
type Value = WeekdaySet;
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a sequence of integers representing weekdays")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let mut weekdays = WeekdaySet::with_capacity(seq.size_hint().unwrap_or(0));
|
||||
while let Some(value) = seq.next_element::<u32>()? {
|
||||
let weekday = weekday_from_days_from_sunday(value).map_err(A::Error::custom)?;
|
||||
weekdays.push(weekday);
|
||||
}
|
||||
Ok(weekdays)
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_seq(WeekdaySetVisitor)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
struct DateAndYear {
|
||||
year: i32,
|
||||
month: u32,
|
||||
day: u32,
|
||||
}
|
||||
|
||||
impl From<NaiveDate> for DateAndYear {
|
||||
fn from(date: NaiveDate) -> Self {
|
||||
use chrono::Datelike;
|
||||
Self {
|
||||
year: date.year(),
|
||||
month: date.month(),
|
||||
day: date.day(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct InvalidDateAndYear;
|
||||
|
||||
impl fmt::Display for InvalidDateAndYear {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"invalid date or year".fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<NaiveDate> for DateAndYear {
|
||||
type Error = InvalidDateAndYear;
|
||||
fn try_into(self) -> Result<NaiveDate, Self::Error> {
|
||||
NaiveDate::from_ymd_opt(self.year, self.month, self.day).ok_or(InvalidDateAndYear)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for DateTimeBound {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match self {
|
||||
DateTimeBound::None => serializer.serialize_none(),
|
||||
DateTimeBound::Yearly(date_time) => {
|
||||
// Discard time
|
||||
let mut date_of_year: DateAndYear = date_time.date().into();
|
||||
// Set year to 0 (since it is yearly)
|
||||
date_of_year.year = 0;
|
||||
|
||||
date_of_year.serialize(serializer)
|
||||
}
|
||||
DateTimeBound::Definite(date_time) => {
|
||||
// Discard time
|
||||
let date_of_year: DateAndYear = date_time.date().into();
|
||||
|
||||
date_of_year.serialize(serializer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for DateTimeBound {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
use serde::de::Error;
|
||||
let date_of_year: Option<DateAndYear> = Deserialize::deserialize(deserializer)?;
|
||||
Ok(match date_of_year {
|
||||
Some(date_of_year) => {
|
||||
let year = date_of_year.year;
|
||||
let date: NaiveDate = date_of_year.try_into().map_err(D::Error::custom)?;
|
||||
let date_time = date.and_hms(0, 0, 0);
|
||||
if year == 0 {
|
||||
DateTimeBound::Yearly(date_time)
|
||||
} else {
|
||||
DateTimeBound::Definite(date_time)
|
||||
}
|
||||
}
|
||||
None => DateTimeBound::None,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the next date matching the `weekday` after `date`
|
||||
fn next_weekday<Tz: TimeZone>(mut date: Date<Tz>, weekday: Weekday) -> Date<Tz> {
|
||||
while date.weekday() != weekday {
|
||||
date = date.succ();
|
||||
}
|
||||
date
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl Schedule {
|
||||
/// Creates a new Schedule.
|
||||
///
|
||||
/// `times` is the times of day the event will be run. `weekdays` is the set of days of week
|
||||
/// the event will be run. `from` and `to` are restrictions on the end and beginning of event
|
||||
/// runs, respectively.
|
||||
pub fn new<T, W>(times: T, weekdays: W, from: DateTimeBound, to: DateTimeBound) -> Schedule
|
||||
where
|
||||
T: IntoIterator<Item = NaiveTime>,
|
||||
W: IntoIterator<Item = Weekday>,
|
||||
{
|
||||
Schedule {
|
||||
times: TimeSet::from_iter(times),
|
||||
weekdays: WeekdaySet::from_iter(weekdays),
|
||||
from,
|
||||
to,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the next `DateTime` the event should run after `reference`
|
||||
///
|
||||
/// Returns `None` if the event will never run after `reference` (ie. must be a `from` bound)
|
||||
pub fn next_run_after<Tz: TimeZone>(&self, reference: &DateTime<Tz>) -> Option<DateTime<Tz>> {
|
||||
let mut to = self.to.resolve_from(reference);
|
||||
let mut from = self.from.resolve_from(reference);
|
||||
if let (Some(from), Some(to)) = (&mut from, &mut to) {
|
||||
// We must handle the case where yearly bounds cross a year boundary
|
||||
if to < from {
|
||||
if reference < to {
|
||||
// Still in the bounds overlapping the previous year boundary
|
||||
*from = from.with_year(from.year() - 1).unwrap();
|
||||
} else {
|
||||
// Awaiting (or in) next years bounds
|
||||
*to = to.with_year(to.year() + 1).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
let from = match (from, &to) {
|
||||
(Some(from), Some(to)) if &from > to => from.with_year(from.year() + 1),
|
||||
(from, _) => from,
|
||||
};
|
||||
let reference = match &from {
|
||||
Some(from) if from > reference => from,
|
||||
_ => reference,
|
||||
}
|
||||
.clone();
|
||||
let mut next_run: Option<DateTime<Tz>> = None;
|
||||
for weekday in &self.weekdays {
|
||||
for time in &self.times {
|
||||
let candidate = next_weekday(reference.date(), *weekday)
|
||||
.and_time(*time)
|
||||
.map(|date| {
|
||||
if date < reference {
|
||||
date + Duration::weeks(1)
|
||||
} else {
|
||||
date
|
||||
}
|
||||
});
|
||||
let candidate = match (candidate, &to) {
|
||||
(Some(date), Some(to)) if &date > to => None,
|
||||
(date, _) => date,
|
||||
};
|
||||
next_run = match (next_run, candidate) {
|
||||
// return whichever is first if there are 2 candidates
|
||||
(Some(d1), Some(d2)) => Some(cmp::min(d1, d2)),
|
||||
// otherwise return whichever isn't None (or None if both are)
|
||||
(o1, o2) => o1.or(o2),
|
||||
}
|
||||
}
|
||||
}
|
||||
next_run
|
||||
}
|
||||
|
||||
/// Gets the next run after the current (local) time
|
||||
///
|
||||
/// See [next_run_after](#method.next_run_after)
|
||||
pub fn next_run_local(&self) -> Option<DateTime<Local>> {
|
||||
self.next_run_after(&Local::now())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use chrono::NaiveDate;
|
||||
|
||||
#[test]
|
||||
fn test_date_time_bound() {
|
||||
use super::DateTimeBound::*;
|
||||
|
||||
let cases: Vec<(DateTimeBound, Option<DateTime<Local>>)> = vec![
|
||||
(None, Option::None),
|
||||
(
|
||||
Definite(NaiveDate::from_ymd(2016, 11, 16).and_hms(10, 30, 0)),
|
||||
Some(Local.ymd(2016, 11, 16).and_hms(10, 30, 0)),
|
||||
),
|
||||
(
|
||||
Yearly(NaiveDate::from_ymd(2016, 11, 16).and_hms(10, 30, 0)),
|
||||
Some(Local.ymd(2018, 11, 16).and_hms(10, 30, 0)),
|
||||
),
|
||||
(
|
||||
Yearly(NaiveDate::from_ymd(2016, 1, 1).and_hms(0, 0, 0)),
|
||||
Some(Local.ymd(2018, 1, 1).and_hms(0, 0, 0)),
|
||||
),
|
||||
(
|
||||
Yearly(NaiveDate::from_ymd(2016, 12, 31).and_hms(23, 59, 59)),
|
||||
Some(Local.ymd(2018, 12, 31).and_hms(23, 59, 59)),
|
||||
),
|
||||
(
|
||||
Yearly(NaiveDate::from_ymd(2012, 2, 29).and_hms(0, 0, 0)),
|
||||
Option::None,
|
||||
), /* leap day */
|
||||
];
|
||||
let from = Local.ymd(2018, 1, 1).and_hms(0, 0, 0);
|
||||
|
||||
for (bound, expected_result) in cases {
|
||||
let result = bound.resolve_from(&from);
|
||||
assert_eq!(result, expected_result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_weekday() {
|
||||
use super::next_weekday;
|
||||
use chrono::Weekday;
|
||||
// (date, weekday, result)
|
||||
let cases: Vec<(Date<Local>, Weekday, Date<Local>)> = vec![
|
||||
(
|
||||
Local.ymd(2016, 11, 16),
|
||||
Weekday::Wed,
|
||||
Local.ymd(2016, 11, 16),
|
||||
),
|
||||
(
|
||||
Local.ymd(2016, 11, 16),
|
||||
Weekday::Fri,
|
||||
Local.ymd(2016, 11, 18),
|
||||
),
|
||||
(
|
||||
Local.ymd(2016, 11, 16),
|
||||
Weekday::Tue,
|
||||
Local.ymd(2016, 11, 22),
|
||||
),
|
||||
(Local.ymd(2016, 12, 30), Weekday::Tue, Local.ymd(2017, 1, 3)),
|
||||
(
|
||||
Local.ymd(2016, 11, 16),
|
||||
Weekday::Tue,
|
||||
Local.ymd(2016, 11, 22),
|
||||
),
|
||||
];
|
||||
|
||||
for (date, weekday, expected_result) in cases {
|
||||
let result = next_weekday(date, weekday);
|
||||
assert_eq!(result, expected_result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_run_after() {
|
||||
use super::{DateTimeBound, Schedule};
|
||||
use chrono::{DateTime, Local, NaiveTime, TimeZone, Weekday};
|
||||
let schedule = Schedule::new(
|
||||
vec![NaiveTime::from_hms(10, 30, 0)],
|
||||
vec![Weekday::Wed],
|
||||
DateTimeBound::None,
|
||||
DateTimeBound::None,
|
||||
);
|
||||
let cases: Vec<(DateTime<Local>, Option<DateTime<Local>>)> = vec![
|
||||
(
|
||||
Local.ymd(2016, 11, 14).and_hms(10, 30, 0),
|
||||
Some(Local.ymd(2016, 11, 16).and_hms(10, 30, 0)),
|
||||
),
|
||||
(
|
||||
Local.ymd(2016, 11, 16).and_hms(10, 20, 0),
|
||||
Some(Local.ymd(2016, 11, 16).and_hms(10, 30, 0)),
|
||||
),
|
||||
(
|
||||
Local.ymd(2016, 11, 16).and_hms(10, 40, 0),
|
||||
Some(Local.ymd(2016, 11, 23).and_hms(10, 30, 0)),
|
||||
),
|
||||
];
|
||||
for (reference, expected_result) in cases {
|
||||
let result = schedule.next_run_after(&reference);
|
||||
assert_eq!(result, expected_result);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_run_after2() {
|
||||
use super::{DateTimeBound, Schedule};
|
||||
use chrono::{DateTime, Local, NaiveTime, TimeZone, Weekday};
|
||||
#[derive(Debug)]
|
||||
struct Case {
|
||||
schedule: Schedule,
|
||||
ref_time: DateTime<Local>,
|
||||
expected_result: Option<DateTime<Local>>,
|
||||
}
|
||||
impl Case {
|
||||
fn new(
|
||||
schedule: Schedule,
|
||||
ref_time: DateTime<Local>,
|
||||
expected_result: Option<DateTime<Local>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
schedule,
|
||||
ref_time,
|
||||
expected_result,
|
||||
}
|
||||
}
|
||||
}
|
||||
let sched1 = Schedule::new(
|
||||
vec![NaiveTime::from_hms(8, 30, 0), NaiveTime::from_hms(20, 0, 0)],
|
||||
vec![Weekday::Thu, Weekday::Fri],
|
||||
DateTimeBound::None,
|
||||
DateTimeBound::None,
|
||||
);
|
||||
let sched2 = Schedule::new(
|
||||
vec![NaiveTime::from_hms(8, 30, 0), NaiveTime::from_hms(20, 0, 0)],
|
||||
vec![Weekday::Thu, Weekday::Fri],
|
||||
DateTimeBound::Definite(NaiveDate::from_ymd(2016, 5, 30).and_hms(0, 0, 0)),
|
||||
DateTimeBound::Definite(NaiveDate::from_ymd(2016, 6, 30).and_hms(0, 0, 0)),
|
||||
);
|
||||
let sched3 = Schedule::new(
|
||||
vec![NaiveTime::from_hms(8, 30, 0), NaiveTime::from_hms(20, 0, 0)],
|
||||
every_day(),
|
||||
DateTimeBound::Yearly(NaiveDate::from_ymd(0, 12, 15).and_hms(0, 0, 0)),
|
||||
DateTimeBound::Yearly(NaiveDate::from_ymd(0, 1, 15).and_hms(0, 0, 0)),
|
||||
);
|
||||
let cases: Vec<Case> = vec![
|
||||
Case::new(
|
||||
sched1.clone(),
|
||||
Local.ymd(2016, 5, 16).and_hms(0, 0, 0),
|
||||
Some(Local.ymd(2016, 5, 19).and_hms(8, 30, 0)),
|
||||
),
|
||||
Case::new(
|
||||
sched1,
|
||||
Local.ymd(2016, 5, 20).and_hms(9, 0, 0),
|
||||
Some(Local.ymd(2016, 5, 20).and_hms(20, 0, 0)),
|
||||
),
|
||||
Case::new(
|
||||
sched2.clone(),
|
||||
Local.ymd(2016, 6, 1).and_hms(0, 0, 0),
|
||||
Some(Local.ymd(2016, 6, 2).and_hms(8, 30, 0)),
|
||||
),
|
||||
Case::new(
|
||||
sched2.clone(),
|
||||
Local.ymd(2016, 5, 1).and_hms(0, 0, 0),
|
||||
Some(Local.ymd(2016, 6, 2).and_hms(8, 30, 0)),
|
||||
),
|
||||
Case::new(sched2, Local.ymd(2016, 7, 1).and_hms(0, 0, 0), None),
|
||||
Case::new(
|
||||
sched3.clone(),
|
||||
Local.ymd(2016, 11, 1).and_hms(0, 0, 0),
|
||||
Some(Local.ymd(2016, 12, 15).and_hms(8, 30, 0)),
|
||||
),
|
||||
Case::new(
|
||||
sched3.clone(),
|
||||
Local.ymd(2017, 1, 1).and_hms(9, 0, 0),
|
||||
Some(Local.ymd(2017, 1, 1).and_hms(20, 0, 0)),
|
||||
),
|
||||
Case::new(
|
||||
sched3,
|
||||
Local.ymd(2016, 1, 30).and_hms(0, 0, 0),
|
||||
Some(Local.ymd(2016, 12, 15).and_hms(8, 30, 0)),
|
||||
),
|
||||
];
|
||||
for case in cases {
|
||||
let result = case.schedule.next_run_after(&case.ref_time);
|
||||
assert_eq!(result, case.expected_result, "case failed: {:?}", case);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize() {
|
||||
let sched = Schedule::new(
|
||||
vec![
|
||||
NaiveTime::from_hms_milli(0, 0, 0, 0),
|
||||
NaiveTime::from_hms_milli(23, 59, 59, 999),
|
||||
],
|
||||
every_day(),
|
||||
DateTimeBound::Yearly(NaiveDate::from_ymd(2020, 1, 1).and_hms(0, 0, 0)),
|
||||
DateTimeBound::Definite(NaiveDate::from_ymd(9999, 12, 31).and_hms(23, 59, 59)),
|
||||
);
|
||||
let ser = serde_json::to_string(&sched).unwrap();
|
||||
// Weekdays should match the order in `every_day()` but with sunday being 0
|
||||
assert_eq!(
|
||||
&ser,
|
||||
"{\
|
||||
\"times\":[\
|
||||
{\"hour\":0,\"minute\":0,\"second\":0,\"millisecond\":0},\
|
||||
{\"hour\":23,\"minute\":59,\"second\":59,\"millisecond\":999}\
|
||||
],\
|
||||
\"weekdays\":[1,2,3,4,5,6,0],\
|
||||
\"from\":{\
|
||||
\"year\":0,\"month\":1,\"day\":1\
|
||||
},\
|
||||
\"to\":{\
|
||||
\"year\":9999,\"month\":12,\"day\":31\
|
||||
}\
|
||||
}"
|
||||
);
|
||||
let sched_de: Schedule = serde_json::from_str(&ser).unwrap();
|
||||
assert_eq!(sched.times, sched_de.times);
|
||||
assert_eq!(sched.weekdays, sched_de.weekdays);
|
||||
// This serialization is lossy (year is discarded for yearly)
|
||||
// assert_eq!(sched_de.from, sched.from);
|
||||
assert_eq!(
|
||||
sched_de.from,
|
||||
DateTimeBound::Yearly(NaiveDate::from_ymd(0, 1, 1).and_hms(0, 0, 0))
|
||||
);
|
||||
// This serialization is also lossy (time is discarded)
|
||||
// assert_eq!(sched_de.to, sched.to);
|
||||
assert_eq!(
|
||||
sched_de.to,
|
||||
DateTimeBound::Definite(NaiveDate::from_ymd(9999, 12, 31).and_hms(0, 0, 0))
|
||||
);
|
||||
}
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
pub mod duration_secs {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn serialize<S>(duration: &Duration, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
duration.as_secs_f64().serialize(serializer)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Duration, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let secs: f64 = Deserialize::deserialize(deserializer)?;
|
||||
Ok(Duration::from_secs_f64(secs))
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
use std::iter::repeat_with;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use tracing::debug;
|
||||
|
||||
pub type ZoneNum = u32;
|
||||
|
||||
pub trait ZoneInterface: Send + Sync {
|
||||
fn num_zones(&self) -> ZoneNum;
|
||||
fn set_zone_state(&self, id: ZoneNum, running: bool);
|
||||
fn get_zone_state(&self, id: ZoneNum) -> bool;
|
||||
}
|
||||
|
||||
pub struct MockZoneInterface {
|
||||
states: Vec<AtomicBool>,
|
||||
}
|
||||
|
||||
impl MockZoneInterface {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(num_zones: ZoneNum) -> Self {
|
||||
Self {
|
||||
states: repeat_with(|| AtomicBool::new(false))
|
||||
.take(num_zones as usize)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ZoneInterface for MockZoneInterface {
|
||||
fn num_zones(&self) -> ZoneNum {
|
||||
self.states.len() as ZoneNum
|
||||
}
|
||||
fn set_zone_state(&self, id: ZoneNum, running: bool) {
|
||||
debug!(id, running, "setting zone");
|
||||
self.states[id as usize].store(running, Ordering::SeqCst);
|
||||
}
|
||||
fn get_zone_state(&self, id: ZoneNum) -> bool {
|
||||
self.states[id as usize].load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_mock_zone_interface() {
|
||||
let iface = MockZoneInterface::new(6);
|
||||
assert_eq!(iface.num_zones(), 6);
|
||||
for i in 0..6u32 {
|
||||
assert_eq!(iface.get_zone_state(i), false);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
iface.set_zone_state(i, true);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
assert_eq!(iface.get_zone_state(i), true);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
iface.set_zone_state(i, false);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
assert_eq!(iface.get_zone_state(i), false);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
[package]
|
||||
name = "sprinklers_database"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
bundled = ["rusqlite/bundled"]
|
||||
|
||||
[dependencies]
|
||||
sprinklers_core = { path = "../sprinklers_core" }
|
||||
rusqlite = "0.24.0"
|
||||
eyre = "0.6.0"
|
||||
serde = { version = "1.0.116" }
|
||||
serde_json = "1.0.57"
|
||||
thiserror = "1.0.20"
|
||||
tracing = { version = "0.1.19" }
|
@ -1,65 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
shopt -s nullglob extglob
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <migration_name>"
|
||||
}
|
||||
|
||||
SCRIPT=$(realpath "$0")
|
||||
SCRIPTPATH=$(dirname "$SCRIPT")
|
||||
|
||||
PROJECT_ROOT=$(realpath --relative-to="$PWD" "$SCRIPTPATH/..")
|
||||
|
||||
MIGRATION_NAME="$1"
|
||||
if [[ -z "$MIGRATION_NAME" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MIGRATIONS_DIR="$PROJECT_ROOT/sprinklers_database/src/migrations"
|
||||
|
||||
# echo "MIGRATIONS_DIR: $MIGRATIONS_DIR"
|
||||
|
||||
pushd "$MIGRATIONS_DIR" >/dev/null
|
||||
SQL_FILES=(+([0-9])*.sql)
|
||||
popd >/dev/null
|
||||
|
||||
# echo "SQL_FILES: "
|
||||
# echo "${SQL_FILES[@]}"
|
||||
|
||||
# Remove anything after first numbers
|
||||
MIGRATION_NUMBERS=("${SQL_FILES[@]%%-*.sql}")
|
||||
|
||||
# echo "MIGRATION_NUMBERS: "
|
||||
# echo "${MIGRATION_NUMBERS[@]}"
|
||||
|
||||
MIGRATION_MAX=0
|
||||
for num in "${MIGRATION_NUMBERS[@]}"; do
|
||||
if (( num > MIGRATION_MAX ))
|
||||
then
|
||||
MIGRATION_MAX=$num
|
||||
fi
|
||||
done
|
||||
|
||||
# echo "MIGRATION_MAX: $MIGRATION_MAX"
|
||||
|
||||
NEXT_MIGRATION=$(( MIGRATION_MAX + 1 ))
|
||||
NEXT_MIGRATION_PREFIX=$( printf "%04d" "$NEXT_MIGRATION" )
|
||||
|
||||
# echo "NEXT_MIGRATION: $NEXT_MIGRATION"
|
||||
# echo "NEXT_MIGRATION_PREFIX: $NEXT_MIGRATION_PREFIX"
|
||||
|
||||
for SUFFIX in "up" "down"; do
|
||||
migration_file="$MIGRATIONS_DIR/$NEXT_MIGRATION_PREFIX-$MIGRATION_NAME-$SUFFIX.sql"
|
||||
echo "Creating migration file $migration_file"
|
||||
touch "$migration_file"
|
||||
done
|
||||
|
||||
LINE_TO_INSERT="\ \ \ \ migs.add(include_file_migration!($NEXT_MIGRATION, \"$NEXT_MIGRATION_PREFIX-$MIGRATION_NAME\"));"
|
||||
|
||||
MIGRATIONS_RS="$PROJECT_ROOT/sprinklers_database/src/migrations/mod.rs"
|
||||
echo "Inserting line in $MIGRATIONS_RS"
|
||||
sed -i "/INSERT MIGRATION ABOVE/i \
|
||||
$LINE_TO_INSERT" "$MIGRATIONS_RS"
|
@ -1,43 +0,0 @@
|
||||
mod migration;
|
||||
mod migrations;
|
||||
mod program;
|
||||
mod sql_json;
|
||||
mod zone;
|
||||
|
||||
pub use migration::*;
|
||||
pub use migrations::create_migrations;
|
||||
pub use program::*;
|
||||
|
||||
pub use rusqlite::Connection as DbConn;
|
||||
|
||||
use sprinklers_core::model::Zones;
|
||||
|
||||
use eyre::Result;
|
||||
use rusqlite::NO_PARAMS;
|
||||
|
||||
pub fn setup_db() -> Result<DbConn> {
|
||||
// let conn = DbConn::open_in_memory()?;
|
||||
let mut conn = DbConn::open("test.db")?;
|
||||
|
||||
// Go ahead and use write ahead log for better perf
|
||||
conn.execute_batch("PRAGMA journal_mode=WAL;")?;
|
||||
|
||||
let migs = create_migrations();
|
||||
migs.apply(&mut conn)?;
|
||||
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
pub fn query_zones(conn: &DbConn) -> Result<Zones> {
|
||||
let mut statement = conn.prepare_cached(
|
||||
"SELECT s.id, s.name, s.interface_id \
|
||||
FROM sections AS s;",
|
||||
)?;
|
||||
let rows = statement.query_map(NO_PARAMS, zone::from_sql)?;
|
||||
let mut zones = Zones::new();
|
||||
for row in rows {
|
||||
let zone = row?;
|
||||
zones.insert(zone.id, zone.into());
|
||||
}
|
||||
Ok(zones)
|
||||
}
|
@ -1 +0,0 @@
|
||||
DROP TABLE sections;
|
@ -1,5 +0,0 @@
|
||||
CREATE TABLE sections (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
interface_id INTEGER NOT NULL
|
||||
);
|
@ -1 +0,0 @@
|
||||
DELETE FROM sections;
|
@ -1,7 +0,0 @@
|
||||
INSERT INTO sections (id, name, interface_id)
|
||||
VALUES (1, 'Front Yard Middle', 0),
|
||||
(2, 'Front Yard Left', 1),
|
||||
(3, 'Front Yard Right', 2),
|
||||
(4, 'Back Yard Middle', 3),
|
||||
(5, 'Back Yard Sauna', 4),
|
||||
(6, 'Garden', 5);
|
@ -1,3 +0,0 @@
|
||||
DROP INDEX program_sequence_items_idx1;
|
||||
DROP TABLE program_sequence_items;
|
||||
DROP TABLE programs;
|
@ -1,20 +0,0 @@
|
||||
CREATE TABLE programs
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
enabled BOOLEAN NOT NULL,
|
||||
schedule TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE program_sequence_items
|
||||
(
|
||||
seq_num INTEGER NOT NULL,
|
||||
program_id INTEGER NOT NULL,
|
||||
section_id INTEGER NOT NULL,
|
||||
duration REAL NOT NULL,
|
||||
FOREIGN KEY (program_id) REFERENCES programs (id),
|
||||
FOREIGN KEY (section_id) REFERENCES sections (id)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX program_sequence_items_idx1
|
||||
ON program_sequence_items (program_id, seq_num);
|
@ -1,2 +0,0 @@
|
||||
DELETE FROM program_sequence_items;
|
||||
DELETE FROM programs;
|
@ -1,14 +0,0 @@
|
||||
INSERT INTO programs (id, name, enabled, schedule)
|
||||
VALUES (1, 'Test Program', TRUE,
|
||||
json_object(
|
||||
'times', json('[{"hour": 16, "minute": 1, "second": 0}]'),
|
||||
'weekdays', json_array(0, 1, 2, 3, 4, 5, 6),
|
||||
'from', NULL,
|
||||
'to', NULL));
|
||||
|
||||
INSERT INTO program_sequence_items (seq_num, program_id, section_id, duration)
|
||||
SELECT row_number() OVER (ORDER BY s.id) seq_num,
|
||||
(SELECT p.id FROM programs as p WHERE p.name = 'Test Program') program_id,
|
||||
s.id section_id,
|
||||
2.0 duration
|
||||
FROM sections AS s;
|
@ -1 +0,0 @@
|
||||
DROP VIEW program_sequences;
|
@ -1,6 +0,0 @@
|
||||
CREATE VIEW program_sequences AS
|
||||
SELECT psi.program_id program_id,
|
||||
json_group_array(json_object(
|
||||
'section_id', psi.section_id,
|
||||
'duration', psi.duration)) sequence
|
||||
FROM program_sequence_items as psi;
|
@ -1,8 +0,0 @@
|
||||
DROP VIEW program_sequences;
|
||||
|
||||
CREATE VIEW program_sequences AS
|
||||
SELECT psi.program_id program_id,
|
||||
json_group_array(json_object(
|
||||
'section_id', psi.section_id,
|
||||
'duration', psi.duration)) sequence
|
||||
FROM program_sequence_items as psi;
|
@ -1,15 +0,0 @@
|
||||
DROP VIEW program_sequences;
|
||||
|
||||
CREATE VIEW program_sequences AS
|
||||
WITH psi_sorted AS (
|
||||
SELECT psi.program_id program_id,
|
||||
json_object(
|
||||
'sectionId', psi.section_id,
|
||||
'duration', psi.duration)
|
||||
obj
|
||||
FROM program_sequence_items AS psi
|
||||
ORDER BY psi.program_id, psi.seq_num)
|
||||
SELECT psi_sorted.program_id program_id,
|
||||
json_group_array(json(psi_sorted.obj)) sequence
|
||||
FROM psi_sorted
|
||||
GROUP BY psi_sorted.program_id;
|
@ -1,23 +0,0 @@
|
||||
use super::migration::{Migrations, SimpleMigration};
|
||||
|
||||
macro_rules! include_file_migration {
|
||||
($mig_version:expr, $mig_name:literal) => {
|
||||
SimpleMigration::new_box(
|
||||
$mig_version,
|
||||
include_str!(concat!($mig_name, "-up.sql")),
|
||||
include_str!(concat!($mig_name, "-down.sql")),
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
pub fn create_migrations() -> Migrations {
|
||||
let mut migs = Migrations::new();
|
||||
migs.add(include_file_migration!(1, "0001-table_sections"));
|
||||
migs.add(include_file_migration!(2, "0002-section_rows"));
|
||||
migs.add(include_file_migration!(3, "0003-table_programs"));
|
||||
migs.add(include_file_migration!(4, "0004-program_rows"));
|
||||
migs.add(include_file_migration!(5, "0005-view_program_sequence"));
|
||||
migs.add(include_file_migration!(6, "0006-fix_view_program_seq"));
|
||||
// INSERT MIGRATION ABOVE -- DO NOT EDIT THIS COMMENT
|
||||
migs
|
||||
}
|
@ -1,225 +0,0 @@
|
||||
use super::sql_json::SqlJson;
|
||||
use super::DbConn;
|
||||
use sprinklers_core::{
|
||||
model::{
|
||||
Program, ProgramId, ProgramItem, ProgramSequence, ProgramUpdateData, Programs, ZoneId,
|
||||
},
|
||||
schedule::Schedule,
|
||||
};
|
||||
|
||||
use eyre::Result;
|
||||
use rusqlite::{params, Row, ToSql, Transaction, NO_PARAMS};
|
||||
use thiserror::Error;
|
||||
|
||||
type SqlProgramSequence = SqlJson<ProgramSequence>;
|
||||
type SqlSchedule = SqlJson<Schedule>;
|
||||
|
||||
fn from_sql<'a>(row: &Row<'a>) -> rusqlite::Result<Program> {
|
||||
Ok(Program {
|
||||
id: row.get(0)?,
|
||||
name: row.get(1)?,
|
||||
enabled: row.get(2)?,
|
||||
schedule: row.get::<_, SqlSchedule>(3)?.into_inner(),
|
||||
sequence: row.get::<_, SqlProgramSequence>(4)?.into_inner(),
|
||||
})
|
||||
}
|
||||
|
||||
struct SqlProgramUpdate<'a> {
|
||||
id: ProgramId,
|
||||
name: Option<&'a String>,
|
||||
enabled: Option<bool>,
|
||||
schedule: Option<SqlJson<&'a Schedule>>,
|
||||
}
|
||||
|
||||
impl<'a> IntoIterator for &'a SqlProgramUpdate<'a> {
|
||||
type Item = &'a dyn ToSql;
|
||||
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
vec![
|
||||
&self.id as &dyn ToSql,
|
||||
&self.name,
|
||||
&self.enabled,
|
||||
&self.schedule,
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
fn update_as_sql(id: ProgramId, program: &ProgramUpdateData) -> SqlProgramUpdate {
|
||||
SqlProgramUpdate {
|
||||
id,
|
||||
name: program.name.as_ref(),
|
||||
enabled: program.enabled,
|
||||
schedule: program.schedule.as_ref().map(SqlJson),
|
||||
}
|
||||
}
|
||||
|
||||
struct SqlProgramItem {
|
||||
program_id: ProgramId,
|
||||
seq_num: isize,
|
||||
zone_id: ZoneId,
|
||||
duration: f64,
|
||||
}
|
||||
|
||||
impl<'a> IntoIterator for &'a SqlProgramItem {
|
||||
type Item = &'a dyn ToSql;
|
||||
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
vec![
|
||||
&self.program_id as &dyn ToSql,
|
||||
&self.seq_num,
|
||||
&self.zone_id,
|
||||
&self.duration,
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
fn item_as_sql(
|
||||
program_item: &ProgramItem,
|
||||
program_id: ProgramId,
|
||||
seq_num: usize,
|
||||
) -> SqlProgramItem {
|
||||
SqlProgramItem {
|
||||
program_id,
|
||||
seq_num: (seq_num + 1) as isize,
|
||||
zone_id: program_item.zone_id,
|
||||
duration: program_item.duration.as_secs_f64(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sequence_as_sql<'a>(
|
||||
program_id: ProgramId,
|
||||
sequence: &'a [ProgramItem],
|
||||
) -> impl Iterator<Item = SqlProgramItem> + 'a {
|
||||
sequence
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(move |(seq_num, item)| item_as_sql(item, program_id, seq_num))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[error("no such program id: {0}")]
|
||||
pub struct NoSuchProgram(pub ProgramId);
|
||||
|
||||
pub fn query_programs(conn: &DbConn) -> Result<Programs> {
|
||||
let query_sql = "\
|
||||
SELECT p.id, p.name, p.enabled, p.schedule, ps.sequence
|
||||
FROM programs AS p
|
||||
INNER JOIN program_sequences AS ps ON ps.program_id = p.id;";
|
||||
let mut statement = conn.prepare_cached(query_sql)?;
|
||||
let rows = statement.query_map(NO_PARAMS, from_sql)?;
|
||||
let mut programs = Programs::new();
|
||||
for row in rows {
|
||||
let program = row?;
|
||||
programs.insert(program.id, program.into());
|
||||
}
|
||||
Ok(programs)
|
||||
}
|
||||
|
||||
pub fn query_program_by_id(conn: &DbConn, id: ProgramId) -> Result<Program> {
|
||||
let query_sql = "\
|
||||
SELECT p.id, p.name, p.enabled, p.schedule, ps.sequence
|
||||
FROM programs AS p
|
||||
INNER JOIN program_sequences AS ps ON ps.program_id = p.id
|
||||
WHERE p.id = ?1;";
|
||||
let mut statement = conn.prepare_cached(query_sql)?;
|
||||
statement
|
||||
.query_row(params![id], from_sql)
|
||||
.map_err(|err| match err {
|
||||
rusqlite::Error::QueryReturnedNoRows => NoSuchProgram(id).into(),
|
||||
e => e.into(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn update_program(
|
||||
trans: &mut Transaction,
|
||||
id: ProgramId,
|
||||
prog: &ProgramUpdateData,
|
||||
) -> Result<()> {
|
||||
let save = trans.savepoint()?;
|
||||
let conn = &*save;
|
||||
let update_sql = "\
|
||||
UPDATE programs
|
||||
SET name = ifnull(?2, name),
|
||||
enabled = ifnull(?3, enabled),
|
||||
schedule = ifnull(?4, schedule)
|
||||
WHERE id = ?1;";
|
||||
let updated = conn
|
||||
.prepare_cached(update_sql)?
|
||||
.execute(&update_as_sql(id, prog))?;
|
||||
if updated == 0 {
|
||||
return Err(NoSuchProgram(id).into());
|
||||
}
|
||||
if let Some(sequence) = &prog.sequence {
|
||||
let clear_seq_sql = "\
|
||||
DELETE
|
||||
FROM program_sequence_items
|
||||
WHERE program_id = ?1;";
|
||||
conn.prepare_cached(clear_seq_sql)?.execute(params![id])?;
|
||||
let insert_seq_sql = "\
|
||||
INSERT INTO program_sequence_items (program_id, seq_num, section_id, duration)
|
||||
VALUES (?1, ?2, ?3, ?4);";
|
||||
let mut insert_seq = conn.prepare_cached(insert_seq_sql)?;
|
||||
for params in sequence_as_sql(id, sequence) {
|
||||
insert_seq.execute(¶ms)?;
|
||||
}
|
||||
drop(insert_seq);
|
||||
}
|
||||
save.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::create_migrations;
|
||||
use rusqlite::Connection;
|
||||
use sprinklers_core::schedule::Duration;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, trace};
|
||||
|
||||
#[test]
|
||||
fn test_update_program() {
|
||||
let mut db_conn = Connection::open_in_memory().unwrap();
|
||||
|
||||
let migs = create_migrations();
|
||||
migs.apply(&mut db_conn).unwrap();
|
||||
|
||||
let mut programs = query_programs(&db_conn).unwrap();
|
||||
|
||||
// HACK: ideally ordmap would have iter_mut()
|
||||
let program_ids: Vec<_> = programs.keys().cloned().collect();
|
||||
let mut trans = db_conn.transaction().unwrap();
|
||||
for prog_id in program_ids {
|
||||
let prog = &mut programs[&prog_id];
|
||||
let prog = Arc::make_mut(prog);
|
||||
debug!(program = debug(&prog), "read program");
|
||||
for _ in 0..1000 {
|
||||
let mut schedule = prog.schedule.clone();
|
||||
if let Some(time) = schedule.times.get_mut(0) {
|
||||
*time += Duration::seconds(5);
|
||||
}
|
||||
let mut sequence = prog.sequence.clone();
|
||||
for item in &mut sequence {
|
||||
item.duration += std::time::Duration::from_secs(1);
|
||||
}
|
||||
let prog_update = ProgramUpdateData {
|
||||
schedule: Some(schedule),
|
||||
sequence: Some(sequence),
|
||||
..ProgramUpdateData::default()
|
||||
};
|
||||
trace!("about to update");
|
||||
update_program(&mut trans, prog.id, &prog_update).unwrap();
|
||||
trace!("updated, now querying");
|
||||
*prog = query_program_by_id(&*trans, prog.id).unwrap();
|
||||
trace!("updated program: {:?}", &prog);
|
||||
}
|
||||
debug!("updated program: {:?}", &prog);
|
||||
}
|
||||
trans.commit().unwrap();
|
||||
debug!("committed final transaction");
|
||||
}
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
use rusqlite::{
|
||||
types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, Value, ValueRef},
|
||||
ToSql,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub struct SqlJson<T>(pub T);
|
||||
|
||||
impl<T> SqlJson<T> {
|
||||
pub fn into_inner(self) -> T {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FromSql for SqlJson<T>
|
||||
where
|
||||
for<'de> T: Deserialize<'de>,
|
||||
{
|
||||
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
|
||||
if let ValueRef::Text(text) = value {
|
||||
let deser_value: T =
|
||||
serde_json::from_slice(text).map_err(|err| FromSqlError::Other(Box::new(err)))?;
|
||||
Ok(SqlJson(deser_value))
|
||||
} else {
|
||||
Err(FromSqlError::InvalidType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ToSql for SqlJson<T>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
|
||||
serde_json::to_string(&self.0)
|
||||
.map(|serialized| ToSqlOutput::Owned(Value::Text(serialized)))
|
||||
.map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))
|
||||
}
|
||||
}
|
@ -1,16 +0,0 @@
|
||||
use sprinklers_core::model::Zone;
|
||||
|
||||
use rusqlite::{Error as SqlError, Row as SqlRow, ToSql};
|
||||
|
||||
pub fn from_sql<'a>(row: &SqlRow<'a>) -> Result<Zone, SqlError> {
|
||||
Ok(Zone {
|
||||
id: row.get(0)?,
|
||||
name: row.get(1)?,
|
||||
interface_id: row.get(2)?,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn as_sql(zone: &Zone) -> Vec<&dyn ToSql> {
|
||||
vec![&zone.id, &zone.name, &zone.interface_id]
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
[package]
|
||||
name = "sprinklers_linux"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
sprinklers_core = { path = "../sprinklers_core" }
|
||||
gpio-cdev = "0.4.0"
|
||||
tracing = "0.1.21"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
eyre = "0.6.0"
|
@ -1,75 +0,0 @@
|
||||
use sprinklers_core::zone_interface::{ZoneInterface, ZoneNum};
|
||||
|
||||
use eyre::WrapErr;
|
||||
use gpio_cdev::{LineHandle, LineRequestFlags};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, trace, warn};
|
||||
|
||||
pub struct LinuxGpio {
|
||||
lines: Vec<LineHandle>,
|
||||
}
|
||||
|
||||
impl ZoneInterface for LinuxGpio {
|
||||
fn num_zones(&self) -> ZoneNum {
|
||||
self.lines.len() as ZoneNum
|
||||
}
|
||||
|
||||
fn set_zone_state(&self, id: ZoneNum, running: bool) {
|
||||
if let Some(line) = &self.lines.get(id as usize) {
|
||||
trace!(
|
||||
line = line.line().offset(),
|
||||
id,
|
||||
running,
|
||||
"setting state of line"
|
||||
);
|
||||
if let Err(err) = line.set_value(running as u8) {
|
||||
error!("error setting GPIO line value: {}", err);
|
||||
}
|
||||
} else {
|
||||
warn!("set_zone_state: invalid zone id: {}", id);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_zone_state(&self, id: ZoneNum) -> bool {
|
||||
if let Some(line) = &self.lines.get(id as usize) {
|
||||
match line.get_value() {
|
||||
Ok(active) => active != 0,
|
||||
Err(err) => {
|
||||
error!("error getting GPIO line value: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("get_zone_state: invalid zone id: {}", id);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct LinuxGpioConfig {
|
||||
chip_path: String,
|
||||
line_offsets: Vec<u32>,
|
||||
}
|
||||
|
||||
impl LinuxGpioConfig {
|
||||
pub fn build(self) -> eyre::Result<LinuxGpio> {
|
||||
let mut chip =
|
||||
gpio_cdev::Chip::new(self.chip_path).wrap_err("could not create gpio_cdev Chip")?;
|
||||
let lines: Result<Vec<_>, eyre::Report> = self
|
||||
.line_offsets
|
||||
.into_iter()
|
||||
.map(|line_offset| {
|
||||
let line = chip
|
||||
.get_line(line_offset)
|
||||
.wrap_err("could not get line for chip")?;
|
||||
let line_handle = line
|
||||
.request(LineRequestFlags::OUTPUT, 0, "sprinklers_rs")
|
||||
.wrap_err("could not request line access")?;
|
||||
Ok(line_handle)
|
||||
})
|
||||
.collect();
|
||||
let lines = lines?;
|
||||
Ok(LinuxGpio { lines })
|
||||
}
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "sprinklers_mqtt"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
sprinklers_core = { path = "../sprinklers_core" }
|
||||
sprinklers_actors = { path = "../sprinklers_actors" }
|
||||
|
||||
actix = { version = "0.10.0", default-features = false }
|
||||
eyre = "0.6.0"
|
||||
rumqttc = "0.1.0"
|
||||
tracing = "0.1.19"
|
||||
serde = { version = "1.0.116", features = ["derive", "rc"] }
|
||||
serde_json = "1.0.57"
|
||||
chrono = "0.4.15"
|
||||
num-traits = "0.2.12"
|
||||
num-derive = "0.3.2"
|
||||
futures-util = { version = "0.3.5", default-features = false, features = ["std", "async-await", "sink"] }
|
||||
im = "15.0.0"
|
||||
|
||||
[dependencies.tokio]
|
||||
version = "0.2.22"
|
||||
default-features = false
|
||||
features = []
|
@ -1,149 +0,0 @@
|
||||
use super::{event_loop::EventLoopTask, request, MqttInterface};
|
||||
use actix::{Actor, ActorContext, ActorFuture, AsyncContext, Handler, WrapFuture};
|
||||
use request::{ErrorCode, RequestContext, RequestError, Response, WithRequestId};
|
||||
use tokio::sync::oneshot;
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
pub(super) struct MqttActor {
|
||||
interface: MqttInterface,
|
||||
event_loop: Option<rumqttc::EventLoop>,
|
||||
quit_rx: Option<oneshot::Receiver<()>>,
|
||||
request_context: RequestContext,
|
||||
}
|
||||
|
||||
impl MqttActor {
|
||||
pub(super) fn new(
|
||||
interface: MqttInterface,
|
||||
event_loop: rumqttc::EventLoop,
|
||||
request_context: RequestContext,
|
||||
) -> Self {
|
||||
Self {
|
||||
interface,
|
||||
event_loop: Some(event_loop),
|
||||
quit_rx: None,
|
||||
request_context,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(&mut self, payload: &[u8], ctx: &mut <Self as Actor>::Context) {
|
||||
let request_value =
|
||||
match serde_json::from_slice::<WithRequestId<serde_json::Value>>(payload) {
|
||||
Ok(r) => r,
|
||||
Err(err) => {
|
||||
warn!("could not deserialize request: {}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let rid = request_value.rid;
|
||||
let request_fut =
|
||||
serde_json::from_value::<request::Request>(request_value.rest).map(|request| {
|
||||
debug!(rid, "about to execute request: {:?}", request);
|
||||
request.execute(&mut self.request_context)
|
||||
});
|
||||
let mut interface = self.interface.clone();
|
||||
let fut = async move {
|
||||
let response = match request_fut {
|
||||
Ok(request_fut) => request_fut.await,
|
||||
Err(deser_err) => RequestError::with_name_and_cause(
|
||||
ErrorCode::Parse,
|
||||
"could not parse request",
|
||||
"request",
|
||||
deser_err,
|
||||
)
|
||||
.into(),
|
||||
};
|
||||
match &response {
|
||||
Response::Success(res) => {
|
||||
debug!(rid, response = display(res), "success response:");
|
||||
}
|
||||
Response::Error(err) => {
|
||||
debug!(rid, "request error: {}", err);
|
||||
}
|
||||
};
|
||||
let resp_with_id = WithRequestId::<request::Response> {
|
||||
rid,
|
||||
rest: response,
|
||||
};
|
||||
if let Err(err) = interface.publish_response(resp_with_id).await {
|
||||
error!("could not publish request response: {:?}", err);
|
||||
}
|
||||
};
|
||||
ctx.spawn(fut.into_actor(self));
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for MqttActor {
|
||||
type Context = actix::Context<Self>;
|
||||
|
||||
fn started(&mut self, ctx: &mut Self::Context) {
|
||||
trace!("MqttActor starting");
|
||||
let event_loop = self.event_loop.take().expect("MqttActor already started");
|
||||
let (quit_tx, quit_rx) = oneshot::channel();
|
||||
ctx.spawn(
|
||||
EventLoopTask::new(event_loop, ctx.address(), quit_tx)
|
||||
.run()
|
||||
.into_actor(self),
|
||||
);
|
||||
self.quit_rx = Some(quit_rx);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
pub(super) struct Quit;
|
||||
|
||||
impl Handler<Quit> for MqttActor {
|
||||
type Result = actix::ResponseActFuture<Self, ()>;
|
||||
fn handle(&mut self, _msg: Quit, _ctx: &mut Self::Context) -> Self::Result {
|
||||
let mut interface = self.interface.clone();
|
||||
let quit_rx = self.quit_rx.take().expect("MqttActor has already quit!");
|
||||
let fut = async move {
|
||||
interface
|
||||
.cancel()
|
||||
.await
|
||||
.expect("could not cancel MQTT client");
|
||||
let _ = quit_rx.await;
|
||||
}
|
||||
.into_actor(self)
|
||||
.map(|_, _, ctx| ctx.stop());
|
||||
Box::pin(fut)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
pub(super) struct Connected;
|
||||
|
||||
impl Handler<Connected> for MqttActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, _msg: Connected, ctx: &mut Self::Context) -> Self::Result {
|
||||
info!("MQTT connected");
|
||||
let mut interface = self.interface.clone();
|
||||
let fut = async move {
|
||||
let res = interface.publish_connected(true).await;
|
||||
let res = res.and(interface.subscribe_requests().await);
|
||||
if let Err(err) = res {
|
||||
error!("error in connection setup: {:?}", err);
|
||||
}
|
||||
};
|
||||
ctx.spawn(fut.into_actor(self));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
pub(super) struct PubRecieve(pub(super) rumqttc::Publish);
|
||||
|
||||
impl Handler<PubRecieve> for MqttActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, msg: PubRecieve, ctx: &mut Self::Context) -> Self::Result {
|
||||
let topic = &msg.0.topic;
|
||||
if topic == &self.interface.topics.requests() {
|
||||
self.handle_request(msg.0.payload.as_ref(), ctx);
|
||||
} else {
|
||||
warn!("received on unknown topic: {:?}", topic);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
use super::actor::{self, MqttActor};
|
||||
use actix::Addr;
|
||||
use rumqttc::{Packet, QoS};
|
||||
use std::{collections::HashSet, time::Duration};
|
||||
use tokio::sync::oneshot;
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
pub(super) struct EventLoopTask {
|
||||
event_loop: rumqttc::EventLoop,
|
||||
mqtt_addr: Addr<MqttActor>,
|
||||
quit_tx: oneshot::Sender<()>,
|
||||
unreleased_pubs: HashSet<u16>,
|
||||
}
|
||||
|
||||
impl EventLoopTask {
|
||||
pub(super) fn new(
|
||||
event_loop: rumqttc::EventLoop,
|
||||
mqtt_addr: Addr<MqttActor>,
|
||||
quit_tx: oneshot::Sender<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
event_loop,
|
||||
mqtt_addr,
|
||||
quit_tx,
|
||||
unreleased_pubs: HashSet::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_incoming(&mut self, incoming: Packet) {
|
||||
trace!(incoming = debug(&incoming), "MQTT incoming message");
|
||||
#[allow(clippy::single_match)]
|
||||
match incoming {
|
||||
Packet::ConnAck(_) => {
|
||||
self.mqtt_addr.do_send(actor::Connected);
|
||||
}
|
||||
Packet::Publish(publish) => {
|
||||
// Only deliver QoS 2 packets once
|
||||
let deliver = if publish.qos == QoS::ExactlyOnce {
|
||||
if self.unreleased_pubs.contains(&publish.pkid) {
|
||||
false
|
||||
} else {
|
||||
self.unreleased_pubs.insert(publish.pkid);
|
||||
true
|
||||
}
|
||||
} else {
|
||||
true
|
||||
};
|
||||
if deliver {
|
||||
self.mqtt_addr.do_send(actor::PubRecieve(publish));
|
||||
}
|
||||
}
|
||||
Packet::PubRel(pubrel) => {
|
||||
self.unreleased_pubs.remove(&pubrel.pkid);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) async fn run(mut self) {
|
||||
use rumqttc::{ConnectionError, Event};
|
||||
let reconnect_timeout = Duration::from_secs(5);
|
||||
self.event_loop.set_reconnection_delay(reconnect_timeout);
|
||||
loop {
|
||||
match self.event_loop.poll().await {
|
||||
Ok(Event::Incoming(incoming)) => {
|
||||
self.handle_incoming(incoming);
|
||||
}
|
||||
Ok(Event::Outgoing(outgoing)) => {
|
||||
trace!(outgoing = debug(&outgoing), "MQTT outgoing message");
|
||||
}
|
||||
Err(ConnectionError::Cancel) => {
|
||||
debug!("MQTT disconnecting");
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("MQTT error, reconnecting: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
let _ = self.quit_tx.send(());
|
||||
}
|
||||
}
|
@ -1,255 +0,0 @@
|
||||
mod actor;
|
||||
mod event_loop;
|
||||
mod request;
|
||||
mod topics;
|
||||
mod update_listener;
|
||||
mod zone_runner_json;
|
||||
|
||||
pub use request::RequestContext;
|
||||
pub use update_listener::UpdateListener;
|
||||
|
||||
use self::topics::{CollectionTopics, Topics};
|
||||
use sprinklers_actors::zone_runner::ZoneRunnerState;
|
||||
use sprinklers_core::model::{ProgramId, Programs, ZoneId, Zones};
|
||||
use zone_runner_json::ZoneRunnerStateJson;
|
||||
|
||||
use actix::{Actor, Addr};
|
||||
use eyre::WrapErr;
|
||||
use rumqttc::{LastWill, MqttOptions, QoS};
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
ops::{Deref, DerefMut},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Options {
|
||||
pub broker_host: String,
|
||||
pub broker_port: u16,
|
||||
pub device_id: String,
|
||||
pub client_id: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MqttInterface {
|
||||
client: rumqttc::AsyncClient,
|
||||
topics: Topics<Arc<str>>,
|
||||
}
|
||||
|
||||
impl MqttInterface {
|
||||
fn new(options: Options) -> (Self, rumqttc::EventLoop) {
|
||||
let mqtt_prefix = format!("devices/{}", options.device_id);
|
||||
let topics: Topics<Arc<str>> = Topics::new(mqtt_prefix.into());
|
||||
let mut mqtt_opts =
|
||||
MqttOptions::new(options.client_id, options.broker_host, options.broker_port);
|
||||
|
||||
let last_will = LastWill::new(topics.connected(), "false", QoS::AtLeastOnce, true);
|
||||
mqtt_opts.set_last_will(last_will);
|
||||
|
||||
let (client, event_loop) = rumqttc::AsyncClient::new(mqtt_opts, 16);
|
||||
|
||||
(Self { client, topics }, event_loop)
|
||||
}
|
||||
|
||||
async fn publish_data<P>(&mut self, topic: String, payload: &P) -> eyre::Result<()>
|
||||
where
|
||||
P: serde::Serialize,
|
||||
{
|
||||
let payload_vec =
|
||||
serde_json::to_vec(payload).wrap_err("failed to serialize publish payload")?;
|
||||
self.client
|
||||
.publish(topic, QoS::AtLeastOnce, true, payload_vec)
|
||||
.await
|
||||
.wrap_err("failed to publish")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn publish_connected(&mut self, connected: bool) -> eyre::Result<()> {
|
||||
self.publish_data(self.topics.connected(), &connected)
|
||||
.await
|
||||
.wrap_err("failed to publish connected topic")
|
||||
}
|
||||
|
||||
async fn cancel(&mut self) -> Result<(), rumqttc::ClientError> {
|
||||
self.client.cancel().await
|
||||
}
|
||||
|
||||
pub fn zones(&mut self) -> MqttCollection<'_, topics::ZoneTopics, Zones> {
|
||||
MqttCollection::new(self)
|
||||
}
|
||||
|
||||
pub fn programs(&mut self) -> MqttCollection<'_, topics::ProgramTopics, Programs> {
|
||||
MqttCollection::new(self)
|
||||
}
|
||||
|
||||
pub async fn publish_zone_runner(&mut self, sr_state: &ZoneRunnerState) -> eyre::Result<()> {
|
||||
let json: ZoneRunnerStateJson = sr_state.into();
|
||||
self.publish_data(self.topics.zone_runner(), &json)
|
||||
.await
|
||||
.wrap_err("failed to publish zone runner")
|
||||
}
|
||||
|
||||
async fn publish_response(&mut self, resp: request::ResponseWithId) -> eyre::Result<()> {
|
||||
let payload_vec =
|
||||
serde_json::to_vec(&resp).wrap_err("failed to serialize request response")?;
|
||||
// TODO: if couldn't serialize, just in case can have a static response
|
||||
self.client
|
||||
.publish(self.topics.responses(), QoS::AtMostOnce, false, payload_vec)
|
||||
.await
|
||||
.wrap_err("failed to publish request response")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn subscribe_requests(&mut self) -> eyre::Result<()> {
|
||||
self.client
|
||||
.subscribe(self.topics.requests(), QoS::ExactlyOnce)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MqttCollection<'a, T, U> {
|
||||
client: &'a mut rumqttc::AsyncClient,
|
||||
topics: T,
|
||||
collection: PhantomData<U>,
|
||||
}
|
||||
|
||||
impl<'a, T: CollectionTopics<'a>, U> MqttCollection<'a, T, U> {
|
||||
fn new(interface: &'a mut MqttInterface) -> Self {
|
||||
Self {
|
||||
client: &mut interface.client,
|
||||
topics: T::new(interface.topics.prefix()),
|
||||
collection: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
async fn publish<P: serde::Serialize>(
|
||||
&mut self,
|
||||
topic: String,
|
||||
payload: &P,
|
||||
) -> eyre::Result<()> {
|
||||
let payload_vec =
|
||||
serde_json::to_vec(payload).wrap_err("failed to serialize publish payload")?;
|
||||
self.client
|
||||
.publish(topic, QoS::AtLeastOnce, true, payload_vec)
|
||||
.await
|
||||
.wrap_err("failed to publish")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn publish_ids_impl(&mut self, ids: &[u32]) -> eyre::Result<()> {
|
||||
self.publish(self.topics.ids(), &ids).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn publish_data<V: serde::Serialize>(&mut self, id: u32, item: &V) -> eyre::Result<()> {
|
||||
self.publish(self.topics.data(id), item).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: CollectionTopics<'a>, V: serde::Serialize>
|
||||
MqttCollection<'a, T, im::OrdMap<u32, Arc<V>>>
|
||||
{
|
||||
async fn publish_ids(&mut self, items: &im::OrdMap<u32, Arc<V>>) -> eyre::Result<()> {
|
||||
let ids: Vec<u32> = items.keys().cloned().collect();
|
||||
self.publish_ids_impl(&ids).await
|
||||
}
|
||||
|
||||
pub async fn publish_diff(
|
||||
&mut self,
|
||||
old_values: Option<&im::OrdMap<u32, Arc<V>>>,
|
||||
new_values: &im::OrdMap<u32, Arc<V>>,
|
||||
) -> eyre::Result<()> {
|
||||
let mut published_ids = false;
|
||||
for (id, value) in new_values {
|
||||
let new_value_different = old_values
|
||||
.and_then(|old_values| old_values.get(id))
|
||||
.map(|old_value| !Arc::ptr_eq(old_value, value));
|
||||
let publish_value = if let Some(different) = new_value_different {
|
||||
different
|
||||
} else {
|
||||
// old value does not exist
|
||||
if !published_ids {
|
||||
self.publish_ids(new_values).await?;
|
||||
published_ids = true;
|
||||
}
|
||||
true
|
||||
};
|
||||
if publish_value {
|
||||
self.publish_data(*id, &**value).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn publish_all(&mut self, values: &im::OrdMap<u32, Arc<V>>) -> eyre::Result<()> {
|
||||
self.publish_diff(None, values).await
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MqttCollection<'a, topics::ZoneTopics<'a>, Zones> {
|
||||
// Zone state can be derived from zone runner state...
|
||||
pub async fn publish_state(&mut self, zone_id: ZoneId, state: bool) -> eyre::Result<()> {
|
||||
self.publish(self.topics.state(zone_id), &state)
|
||||
.await
|
||||
.wrap_err("failed to publish zone state")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MqttCollection<'a, topics::ProgramTopics<'a>, Programs> {
|
||||
pub async fn publish_running(
|
||||
&mut self,
|
||||
program_id: ProgramId,
|
||||
running: bool,
|
||||
) -> eyre::Result<()> {
|
||||
self.publish(self.topics.running(program_id), &running)
|
||||
.await
|
||||
.wrap_err("failed to publish program running")
|
||||
}
|
||||
|
||||
pub async fn publish_next_run(
|
||||
&mut self,
|
||||
program_id: ProgramId,
|
||||
next_run: chrono::DateTime<chrono::Local>,
|
||||
) -> eyre::Result<()> {
|
||||
let payload = next_run.to_rfc3339();
|
||||
self.publish(self.topics.next_run(program_id), &payload)
|
||||
.await
|
||||
.wrap_err("failed to publish program next run")
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MqttInterfaceTask {
|
||||
interface: MqttInterface,
|
||||
addr: Addr<actor::MqttActor>,
|
||||
}
|
||||
|
||||
impl MqttInterfaceTask {
|
||||
pub fn start(options: Options, request_context: RequestContext) -> Self {
|
||||
let (interface, event_loop) = MqttInterface::new(options);
|
||||
|
||||
let addr = actor::MqttActor::new(interface.clone(), event_loop, request_context).start();
|
||||
|
||||
Self { interface, addr }
|
||||
}
|
||||
|
||||
pub async fn quit(self) -> eyre::Result<()> {
|
||||
self.addr.send(actor::Quit).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for MqttInterfaceTask {
|
||||
type Target = MqttInterface;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.interface
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for MqttInterfaceTask {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.interface
|
||||
}
|
||||
}
|
@ -1,297 +0,0 @@
|
||||
use sprinklers_actors::{ProgramRunner, StateManager, ZoneRunner};
|
||||
use sprinklers_core::model::Zones;
|
||||
|
||||
use futures_util::{ready, FutureExt};
|
||||
use num_derive::FromPrimitive;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{fmt, future::Future, pin::Pin, task::Poll};
|
||||
use tokio::sync::watch;
|
||||
|
||||
mod programs;
|
||||
mod zones;
|
||||
|
||||
pub struct RequestContext {
|
||||
pub zones: watch::Receiver<Zones>,
|
||||
pub zone_runner: ZoneRunner,
|
||||
pub program_runner: ProgramRunner,
|
||||
pub state_manager: StateManager,
|
||||
}
|
||||
|
||||
type BoxFuture<Output> = Pin<Box<dyn Future<Output = Output>>>;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, FromPrimitive)]
|
||||
#[repr(u16)]
|
||||
pub enum ErrorCode {
|
||||
BadRequest = 100,
|
||||
NotSpecified = 101,
|
||||
Parse = 102,
|
||||
Range = 103,
|
||||
InvalidData = 104,
|
||||
BadToken = 105,
|
||||
Unauthorized = 106,
|
||||
NoPermission = 107,
|
||||
NotFound = 109,
|
||||
// NotUnique = 110,
|
||||
NoSuchZone = 120,
|
||||
NoSuchZoneRun = 121,
|
||||
NoSuchProgram = 122,
|
||||
Internal = 200,
|
||||
NotImplemented = 201,
|
||||
Timeout = 300,
|
||||
// ServerDisconnected = 301,
|
||||
// BrokerDisconnected = 302,
|
||||
}
|
||||
|
||||
mod ser {
|
||||
use super::ErrorCode;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
impl Serialize for ErrorCode {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_u16(*self as u16)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ErrorCode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let prim = u16::deserialize(deserializer)?;
|
||||
ErrorCode::from_u16(prim)
|
||||
.ok_or_else(|| <D::Error as serde::de::Error>::custom("invalid ErrorCode"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase", tag = "result")]
|
||||
pub struct RequestError {
|
||||
code: ErrorCode,
|
||||
message: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
name: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
cause: Option<String>,
|
||||
}
|
||||
|
||||
impl fmt::Display for RequestError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "request error (code {:?}", self.code)?;
|
||||
if let Some(name) = &self.name {
|
||||
write!(f, "on {}", name)?;
|
||||
}
|
||||
write!(f, "): {}", self.message)?;
|
||||
if let Some(cause) = &self.cause {
|
||||
write!(f, ", caused by {}", cause)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for RequestError {}
|
||||
|
||||
impl From<eyre::Report> for RequestError {
|
||||
fn from(report: eyre::Report) -> Self {
|
||||
let mut chain = report.chain();
|
||||
let message = match chain.next() {
|
||||
Some(a) => a.to_string(),
|
||||
None => "unknown error".to_string(),
|
||||
};
|
||||
let cause = chain.fold(None, |cause, err| match cause {
|
||||
Some(cause) => Some(format!("{}: {}", cause, err)),
|
||||
None => Some(err.to_string()),
|
||||
});
|
||||
RequestError::new(ErrorCode::Internal, message, None, cause)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl RequestError {
|
||||
pub fn new<M>(code: ErrorCode, message: M, name: Option<String>, cause: Option<String>) -> Self
|
||||
where
|
||||
M: ToString,
|
||||
{
|
||||
Self {
|
||||
code,
|
||||
message: message.to_string(),
|
||||
name,
|
||||
cause,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simple<M>(code: ErrorCode, message: M) -> Self
|
||||
where
|
||||
M: ToString,
|
||||
{
|
||||
Self::new(code, message, None, None)
|
||||
}
|
||||
|
||||
pub fn with_name<M, N>(code: ErrorCode, message: M, name: N) -> Self
|
||||
where
|
||||
M: ToString,
|
||||
N: ToString,
|
||||
{
|
||||
Self::new(code, message, Some(name.to_string()), None)
|
||||
}
|
||||
|
||||
pub fn with_cause<M, C>(code: ErrorCode, message: M, cause: C) -> Self
|
||||
where
|
||||
M: ToString,
|
||||
C: ToString,
|
||||
{
|
||||
Self::new(code, message, None, Some(cause.to_string()))
|
||||
}
|
||||
|
||||
pub fn with_name_and_cause<M, N, C>(code: ErrorCode, message: M, name: N, cause: C) -> Self
|
||||
where
|
||||
M: ToString,
|
||||
N: ToString,
|
||||
C: ToString,
|
||||
{
|
||||
Self::new(
|
||||
code,
|
||||
message,
|
||||
Some(name.to_string()),
|
||||
Some(cause.to_string()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct ResponseMessage {
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl ResponseMessage {
|
||||
fn new<M>(message: M) -> Self
|
||||
where
|
||||
M: ToString,
|
||||
{
|
||||
ResponseMessage {
|
||||
message: message.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for ResponseMessage {
|
||||
fn from(message: String) -> Self {
|
||||
ResponseMessage { message }
|
||||
}
|
||||
}
|
||||
|
||||
pub type ResponseValue = serde_json::Value;
|
||||
|
||||
type RequestResult<Ok = ResponseValue> = Result<Ok, RequestError>;
|
||||
type RequestFuture<Ok = ResponseValue> = BoxFuture<RequestResult<Ok>>;
|
||||
|
||||
trait IRequest {
|
||||
type Response: Serialize;
|
||||
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response>;
|
||||
|
||||
fn exec_erased(self, ctx: &mut RequestContext) -> RequestFuture
|
||||
where
|
||||
Self::Response: 'static,
|
||||
Self: Sized,
|
||||
{
|
||||
// TODO: figure out how to get rid of this nested box
|
||||
Box::pin(ErasedRequestFuture(self.exec(ctx)))
|
||||
}
|
||||
}
|
||||
|
||||
struct ErasedRequestFuture<Response>(RequestFuture<Response>)
|
||||
where
|
||||
Response: Serialize;
|
||||
|
||||
impl<Response> Future for ErasedRequestFuture<Response>
|
||||
where
|
||||
Response: Serialize,
|
||||
{
|
||||
type Output = RequestResult;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
|
||||
use eyre::WrapErr;
|
||||
let response = ready!(self.as_mut().0.poll_unpin(cx));
|
||||
Poll::Ready(response.and_then(|res| {
|
||||
serde_json::to_value(res)
|
||||
.wrap_err("could not serialize response")
|
||||
.map_err(RequestError::from)
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase", tag = "result")]
|
||||
pub enum Response {
|
||||
Success(ResponseValue),
|
||||
Error(RequestError),
|
||||
}
|
||||
|
||||
impl From<RequestResult> for Response {
|
||||
fn from(res: RequestResult) -> Self {
|
||||
match res {
|
||||
Ok(value) => Response::Success(value),
|
||||
Err(error) => Response::Error(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RequestError> for Response {
|
||||
fn from(error: RequestError) -> Self {
|
||||
Response::Error(error)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WithRequestId<T> {
|
||||
pub rid: i32,
|
||||
#[serde(flatten)]
|
||||
pub rest: T,
|
||||
}
|
||||
|
||||
pub type ResponseWithId = WithRequestId<Response>;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase", tag = "type")]
|
||||
pub enum Request {
|
||||
// TODO: update nomenclature
|
||||
#[serde(rename = "runSection")]
|
||||
RunZone(zones::RunZoneRequest),
|
||||
#[serde(rename = "cancelSection")]
|
||||
CancelZone(zones::CancelZoneRequest),
|
||||
#[serde(rename = "cancelSectionRunId")]
|
||||
CancelZoneRunId(zones::CancelZoneRunIdRequest),
|
||||
#[serde(rename = "pauseSectionRunner")]
|
||||
PauseZoneRunner(zones::PauseZoneRunnerRequest),
|
||||
RunProgram(programs::RunProgramRequest),
|
||||
CancelProgram(programs::CancelProgramRequest),
|
||||
UpdateProgram(programs::UpdateProgramRequest),
|
||||
}
|
||||
|
||||
impl IRequest for Request {
|
||||
type Response = ResponseValue;
|
||||
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture {
|
||||
match self {
|
||||
Request::RunZone(req) => req.exec_erased(ctx),
|
||||
Request::CancelZone(req) => req.exec_erased(ctx),
|
||||
Request::CancelZoneRunId(req) => req.exec_erased(ctx),
|
||||
Request::PauseZoneRunner(req) => req.exec_erased(ctx),
|
||||
Request::RunProgram(req) => req.exec_erased(ctx),
|
||||
Request::CancelProgram(req) => req.exec_erased(ctx),
|
||||
Request::UpdateProgram(req) => req.exec_erased(ctx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Request {
|
||||
pub fn execute(self, ctx: &mut RequestContext) -> impl Future<Output = Response> {
|
||||
self.exec(ctx).map(Response::from)
|
||||
}
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
use super::*;
|
||||
use sprinklers_actors::{program_runner::Error, state_manager::StateError};
|
||||
use sprinklers_core::model::{ProgramId, ProgramRef, ProgramUpdateData};
|
||||
|
||||
use eyre::WrapErr;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunProgramRequest {
|
||||
program_id: ProgramId,
|
||||
}
|
||||
|
||||
impl IRequest for RunProgramRequest {
|
||||
type Response = ResponseMessage;
|
||||
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut program_runner = ctx.program_runner.clone();
|
||||
let program_id = self.program_id;
|
||||
Box::pin(async move {
|
||||
match program_runner.run_program_id(program_id).await {
|
||||
Ok(program) => Ok(ResponseMessage::new(format!(
|
||||
"running program '{}'",
|
||||
program.name
|
||||
))),
|
||||
Err(e @ Error::InvalidProgramId(_)) => Err(RequestError::with_name(
|
||||
ErrorCode::NoSuchProgram,
|
||||
e,
|
||||
"program",
|
||||
)),
|
||||
Err(e) => Err(e).wrap_err("could not run program")?,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelProgramRequest {
|
||||
program_id: ProgramId,
|
||||
}
|
||||
|
||||
impl IRequest for CancelProgramRequest {
|
||||
type Response = ResponseMessage;
|
||||
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut program_runner = ctx.program_runner.clone();
|
||||
let program_id = self.program_id;
|
||||
Box::pin(async move {
|
||||
let cancelled = program_runner
|
||||
.cancel_program(program_id)
|
||||
.await
|
||||
.wrap_err("could not run cancel program")?;
|
||||
match cancelled {
|
||||
Some(program) => Ok(ResponseMessage::new(format!(
|
||||
"program '{}' cancelled",
|
||||
program.name
|
||||
))),
|
||||
None => Err(RequestError::with_name(
|
||||
ErrorCode::NoSuchProgram,
|
||||
"program was not running",
|
||||
"program",
|
||||
)),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateProgramRequest {
|
||||
program_id: ProgramId,
|
||||
data: ProgramUpdateData,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UpdateProgramResponse {
|
||||
message: String,
|
||||
data: ProgramRef,
|
||||
}
|
||||
|
||||
impl IRequest for UpdateProgramRequest {
|
||||
type Response = UpdateProgramResponse;
|
||||
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut state_manager = ctx.state_manager.clone();
|
||||
Box::pin(async move {
|
||||
let new_program = state_manager
|
||||
.update_program(self.program_id, self.data)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
e @ StateError::NoSuchProgram(_) => RequestError::with_name_and_cause(
|
||||
ErrorCode::NoSuchProgram,
|
||||
"could not update program",
|
||||
"program",
|
||||
e,
|
||||
),
|
||||
e => RequestError::from(eyre::Report::from(e)),
|
||||
})?;
|
||||
Ok(UpdateProgramResponse {
|
||||
message: format!("updated program '{}'", new_program.name),
|
||||
data: new_program,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
@ -1,160 +0,0 @@
|
||||
use super::*;
|
||||
use sprinklers_actors::zone_runner::ZoneRunHandle;
|
||||
use sprinklers_core::model::{self, ZoneRef};
|
||||
use sprinklers_core::serde::duration_secs;
|
||||
|
||||
use eyre::WrapErr;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct ZoneId(pub model::ZoneId);
|
||||
|
||||
impl ZoneId {
|
||||
fn get_zone(self, zones: &Zones) -> Result<ZoneRef, RequestError> {
|
||||
zones
|
||||
.get(&self.0)
|
||||
.cloned()
|
||||
.ok_or_else(|| RequestError::with_name(ErrorCode::NoSuchZone, "no such zone", "zone"))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunZoneRequest {
|
||||
// TODO: update nomenclature
|
||||
#[serde(rename = "sectionId")]
|
||||
pub zone_id: ZoneId,
|
||||
#[serde(with = "duration_secs")]
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RunZoneResponse {
|
||||
pub message: String,
|
||||
pub run_id: ZoneRunHandle,
|
||||
}
|
||||
|
||||
impl IRequest for RunZoneRequest {
|
||||
type Response = RunZoneResponse;
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut zone_runner = ctx.zone_runner.clone();
|
||||
let zone = self.zone_id.get_zone(&*ctx.zones.borrow());
|
||||
let duration = self.duration;
|
||||
Box::pin(async move {
|
||||
let zone = zone?;
|
||||
let handle = zone_runner
|
||||
.queue_run(zone.clone(), duration)
|
||||
.await
|
||||
.wrap_err("could not queue run")?;
|
||||
Ok(RunZoneResponse {
|
||||
message: format!("running zone '{}' for {:?}", &zone.name, duration),
|
||||
run_id: handle,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelZoneRequest {
|
||||
// TODO: update nomenclature
|
||||
#[serde(rename = "sectionId")]
|
||||
pub zone_id: ZoneId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelZoneResponse {
|
||||
pub message: String,
|
||||
pub cancelled: usize,
|
||||
}
|
||||
|
||||
impl IRequest for CancelZoneRequest {
|
||||
type Response = CancelZoneResponse;
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut zone_runner = ctx.zone_runner.clone();
|
||||
let zone = self.zone_id.get_zone(&*ctx.zones.borrow());
|
||||
Box::pin(async move {
|
||||
let zone = zone?;
|
||||
let cancelled = zone_runner
|
||||
.cancel_by_zone(zone.id)
|
||||
.await
|
||||
.wrap_err("could not cancel zone")?;
|
||||
Ok(CancelZoneResponse {
|
||||
message: format!("cancelled {} runs for zone '{}'", cancelled, zone.name),
|
||||
cancelled,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelZoneRunIdRequest {
|
||||
pub run_id: ZoneRunHandle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CancelZoneRunIdResponse {
|
||||
pub message: String,
|
||||
pub cancelled: bool,
|
||||
}
|
||||
|
||||
impl IRequest for CancelZoneRunIdRequest {
|
||||
type Response = ResponseMessage;
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut zone_runner = ctx.zone_runner.clone();
|
||||
Box::pin(async move {
|
||||
let cancelled = zone_runner
|
||||
.cancel_run(self.run_id)
|
||||
.await
|
||||
.wrap_err("could not cancel zone run")?;
|
||||
if cancelled {
|
||||
Ok(ResponseMessage::new("cancelled zone run"))
|
||||
} else {
|
||||
Err(RequestError::with_name(
|
||||
ErrorCode::NoSuchZoneRun,
|
||||
"no such zone run",
|
||||
"zone run",
|
||||
))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PauseZoneRunnerRequest {
|
||||
pub paused: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PauseZoneRunnerResponse {
|
||||
pub message: String,
|
||||
pub paused: bool,
|
||||
}
|
||||
|
||||
impl IRequest for PauseZoneRunnerRequest {
|
||||
type Response = PauseZoneRunnerResponse;
|
||||
fn exec(self, ctx: &mut RequestContext) -> RequestFuture<Self::Response> {
|
||||
let mut zone_runner = ctx.zone_runner.clone();
|
||||
let paused = self.paused;
|
||||
Box::pin(async move {
|
||||
if paused {
|
||||
zone_runner.pause().await
|
||||
} else {
|
||||
zone_runner.unpause().await
|
||||
}
|
||||
.wrap_err("could not pause/unpause zone runner")?;
|
||||
Ok(PauseZoneRunnerResponse {
|
||||
message: format!("{} zone runner", if paused { "paused" } else { "unpaused" }),
|
||||
paused,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
pub trait CollectionTopics<'t> {
|
||||
fn new(prefix: &'t str) -> Self;
|
||||
fn ids(&self) -> String;
|
||||
fn data(&self, id: u32) -> String;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ZoneTopics<'a>(pub &'a str);
|
||||
|
||||
impl<'a> CollectionTopics<'a> for ZoneTopics<'a> {
|
||||
fn new(prefix: &'a str) -> Self {
|
||||
ZoneTopics(prefix)
|
||||
}
|
||||
|
||||
fn ids(&self) -> String {
|
||||
// TODO: change nomenclature
|
||||
format!("{}/sections", self.0)
|
||||
}
|
||||
|
||||
fn data(&self, zone_id: u32) -> String {
|
||||
// TODO: change nomenclature
|
||||
format!("{}/sections/{}", self.0, zone_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ZoneTopics<'a> {
|
||||
pub fn state(&self, zone_id: u32) -> String {
|
||||
// TODO: change nomenclature
|
||||
format!("{}/sections/{}/state", self.0, zone_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ProgramTopics<'a>(pub &'a str);
|
||||
|
||||
impl<'a> CollectionTopics<'a> for ProgramTopics<'a> {
|
||||
fn new(prefix: &'a str) -> Self {
|
||||
ProgramTopics(prefix)
|
||||
}
|
||||
|
||||
fn ids(&self) -> String {
|
||||
format!("{}/programs", self.0)
|
||||
}
|
||||
|
||||
fn data(&self, zone_id: u32) -> String {
|
||||
format!("{}/programs/{}", self.0, zone_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ProgramTopics<'a> {
|
||||
pub fn running(&self, zone_id: u32) -> String {
|
||||
format!("{}/programs/{}/running", self.0, zone_id)
|
||||
}
|
||||
|
||||
pub fn next_run(&self, zone_id: u32) -> String {
|
||||
// TODO: reconcile naming convention
|
||||
format!("{}/programs/{}/nextRun", self.0, zone_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Topics<T: AsRef<str>>(pub T);
|
||||
|
||||
impl<T: AsRef<str>> Topics<T> {
|
||||
pub fn new(prefix: T) -> Self {
|
||||
Self(prefix)
|
||||
}
|
||||
|
||||
pub fn prefix(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
|
||||
pub fn connected(&self) -> String {
|
||||
format!("{}/connected", self.0.as_ref())
|
||||
}
|
||||
|
||||
pub fn zones(&self) -> ZoneTopics {
|
||||
ZoneTopics::new(self.0.as_ref())
|
||||
}
|
||||
|
||||
pub fn programs(&self) -> ProgramTopics {
|
||||
ProgramTopics::new(self.0.as_ref())
|
||||
}
|
||||
|
||||
pub fn zone_runner(&self) -> String {
|
||||
// TODO: change nomenclature
|
||||
format!("{}/section_runner", self.0.as_ref())
|
||||
}
|
||||
|
||||
pub fn requests(&self) -> String {
|
||||
format!("{}/requests", self.0.as_ref())
|
||||
}
|
||||
|
||||
pub fn responses(&self) -> String {
|
||||
format!("{}/responses", self.0.as_ref())
|
||||
}
|
||||
}
|
@ -1,295 +0,0 @@
|
||||
use super::MqttInterface;
|
||||
use sprinklers_actors::{
|
||||
program_runner::{ProgramEvent, ProgramEventRecv},
|
||||
zone_runner::{ZoneEvent, ZoneEventRecv, ZoneRunnerState, ZoneRunnerStateRecv},
|
||||
};
|
||||
|
||||
use actix::{fut::wrap_future, Actor, ActorContext, Addr, AsyncContext, Handler, StreamHandler};
|
||||
use futures_util::TryFutureExt;
|
||||
use sprinklers_core::model::{Programs, Zones};
|
||||
use tokio::sync::{broadcast, watch};
|
||||
use tracing::{trace, warn};
|
||||
|
||||
struct UpdateListenerActor {
|
||||
mqtt_interface: MqttInterface,
|
||||
old_zones: Option<Zones>,
|
||||
old_programs: Option<Programs>,
|
||||
}
|
||||
|
||||
impl UpdateListenerActor {
|
||||
fn new(mqtt_interface: MqttInterface) -> Self {
|
||||
Self {
|
||||
mqtt_interface,
|
||||
old_zones: None,
|
||||
old_programs: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for UpdateListenerActor {
|
||||
type Context = actix::Context<Self>;
|
||||
|
||||
fn started(&mut self, _ctx: &mut Self::Context) {
|
||||
trace!("starting UpdateListener");
|
||||
}
|
||||
|
||||
fn stopped(&mut self, _ctx: &mut Self::Context) {
|
||||
trace!("stopped UpdateListener")
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<Zones> for UpdateListenerActor {
|
||||
fn handle(&mut self, zones: Zones, ctx: &mut Self::Context) {
|
||||
let mut mqtt_interface = self.mqtt_interface.clone();
|
||||
|
||||
let old_zones = self.old_zones.replace(zones.clone());
|
||||
|
||||
let fut = async move {
|
||||
if old_zones.is_none() {
|
||||
// Some what of a hack
|
||||
// Initialize zone running states to false the first time we
|
||||
// receive zones
|
||||
for zone_id in zones.keys() {
|
||||
mqtt_interface
|
||||
.zones()
|
||||
.publish_state(*zone_id, false)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
mqtt_interface
|
||||
.zones()
|
||||
.publish_diff(old_zones.as_ref(), &zones)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.unwrap_or_else(|err: eyre::Report| warn!("could not publish programs: {:?}", err));
|
||||
ctx.spawn(wrap_future(fut));
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<Result<ZoneEvent, broadcast::RecvError>> for UpdateListenerActor {
|
||||
fn handle(&mut self, event: Result<ZoneEvent, broadcast::RecvError>, ctx: &mut Self::Context) {
|
||||
let event = match event {
|
||||
Ok(ev) => ev,
|
||||
Err(broadcast::RecvError::Closed) => unreachable!(),
|
||||
Err(broadcast::RecvError::Lagged(n)) => {
|
||||
warn!("zone events lagged by {}", n);
|
||||
return;
|
||||
}
|
||||
};
|
||||
if let Some((zone_id, state)) = match event {
|
||||
ZoneEvent::RunStart(_, zone) | ZoneEvent::RunUnpause(_, zone) => Some((zone.id, true)),
|
||||
ZoneEvent::RunFinish(_, zone)
|
||||
| ZoneEvent::RunPause(_, zone)
|
||||
| ZoneEvent::RunCancel(_, zone) => Some((zone.id, false)),
|
||||
ZoneEvent::RunnerPause | ZoneEvent::RunnerUnpause => None,
|
||||
} {
|
||||
let mut mqtt_interface = self.mqtt_interface.clone();
|
||||
let fut = async move {
|
||||
if let Err(err) = mqtt_interface.zones().publish_state(zone_id, state).await {
|
||||
warn!("could not publish zone state: {}", err);
|
||||
}
|
||||
};
|
||||
ctx.spawn(wrap_future(fut));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<Result<ProgramEvent, broadcast::RecvError>> for UpdateListenerActor {
|
||||
fn handle(
|
||||
&mut self,
|
||||
event: Result<ProgramEvent, broadcast::RecvError>,
|
||||
ctx: &mut Self::Context,
|
||||
) {
|
||||
let event = match event {
|
||||
Ok(ev) => ev,
|
||||
Err(broadcast::RecvError::Closed) => unreachable!(),
|
||||
Err(broadcast::RecvError::Lagged(n)) => {
|
||||
warn!("program events lagged by {}", n);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let mut mqtt_interface = self.mqtt_interface.clone();
|
||||
let fut = async move {
|
||||
enum Publish {
|
||||
Running(bool),
|
||||
NextRun(chrono::DateTime<chrono::Local>),
|
||||
}
|
||||
|
||||
let (program_id, publish) = match event {
|
||||
ProgramEvent::RunStart(prog) => (prog.id, Publish::Running(true)),
|
||||
ProgramEvent::RunFinish(prog) | ProgramEvent::RunCancel(prog) => {
|
||||
(prog.id, Publish::Running(false))
|
||||
}
|
||||
ProgramEvent::NextRun(prog, next_run) => (prog.id, Publish::NextRun(next_run)),
|
||||
};
|
||||
match publish {
|
||||
Publish::Running(running) => {
|
||||
if let Err(err) = mqtt_interface
|
||||
.programs()
|
||||
.publish_running(program_id, running)
|
||||
.await
|
||||
{
|
||||
warn!("could not publish program running: {}", err);
|
||||
}
|
||||
}
|
||||
Publish::NextRun(next_run) => {
|
||||
if let Err(err) = mqtt_interface
|
||||
.programs()
|
||||
.publish_next_run(program_id, next_run)
|
||||
.await
|
||||
{
|
||||
warn!("could not publish program next run: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
ctx.spawn(wrap_future(fut));
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<ZoneRunnerState> for UpdateListenerActor {
|
||||
fn handle(&mut self, state: ZoneRunnerState, ctx: &mut Self::Context) {
|
||||
let mut mqtt_interface = self.mqtt_interface.clone();
|
||||
let fut = async move {
|
||||
if let Err(err) = mqtt_interface.publish_zone_runner(&state).await {
|
||||
warn!("could not publish zone runner: {}", err);
|
||||
}
|
||||
};
|
||||
ctx.spawn(wrap_future(fut));
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<Programs> for UpdateListenerActor {
|
||||
fn handle(&mut self, programs: Programs, ctx: &mut Self::Context) {
|
||||
let mut mqtt_interface = self.mqtt_interface.clone();
|
||||
|
||||
let old_programs = self.old_programs.replace(programs.clone());
|
||||
|
||||
let fut = async move {
|
||||
let mut mqtt_progs = mqtt_interface.programs();
|
||||
if old_programs.is_none() {
|
||||
// Some what of a hack
|
||||
// Initialize program running states to false the first time we
|
||||
// receive programs
|
||||
for program_id in programs.keys() {
|
||||
mqtt_progs.publish_running(*program_id, false).await?;
|
||||
}
|
||||
}
|
||||
mqtt_progs
|
||||
.publish_diff(old_programs.as_ref(), &programs)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.unwrap_or_else(|err: eyre::Report| warn!("could not publish programs: {:?}", err));
|
||||
ctx.spawn(wrap_future(fut));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
struct Quit;
|
||||
|
||||
impl Handler<Quit> for UpdateListenerActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, _msg: Quit, ctx: &mut Self::Context) -> Self::Result {
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
|
||||
trait Listenable<A>: Send
|
||||
where
|
||||
A: Actor,
|
||||
{
|
||||
fn listen(self, ctx: &mut A::Context);
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
struct Listen<L>(L)
|
||||
where
|
||||
L: Listenable<UpdateListenerActor>;
|
||||
|
||||
impl<L> Handler<Listen<L>> for UpdateListenerActor
|
||||
where
|
||||
L: Listenable<Self>,
|
||||
{
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, msg: Listen<L>, ctx: &mut Self::Context) -> Self::Result {
|
||||
msg.0.listen(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
impl Listenable<UpdateListenerActor> for watch::Receiver<Zones> {
|
||||
fn listen(self, ctx: &mut <UpdateListenerActor as Actor>::Context) {
|
||||
ctx.add_stream(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl Listenable<UpdateListenerActor> for ZoneEventRecv {
|
||||
fn listen(self, ctx: &mut <UpdateListenerActor as Actor>::Context) {
|
||||
ctx.add_stream(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl Listenable<UpdateListenerActor> for ZoneRunnerStateRecv {
|
||||
fn listen(self, ctx: &mut <UpdateListenerActor as Actor>::Context) {
|
||||
ctx.add_stream(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl Listenable<UpdateListenerActor> for watch::Receiver<Programs> {
|
||||
fn listen(self, ctx: &mut <UpdateListenerActor as Actor>::Context) {
|
||||
ctx.add_stream(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl Listenable<UpdateListenerActor> for ProgramEventRecv {
|
||||
fn listen(self, ctx: &mut <UpdateListenerActor as Actor>::Context) {
|
||||
ctx.add_stream(self);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UpdateListener {
|
||||
addr: Addr<UpdateListenerActor>,
|
||||
}
|
||||
|
||||
impl UpdateListener {
|
||||
pub fn start(mqtt_interface: MqttInterface) -> Self {
|
||||
let addr = UpdateListenerActor::new(mqtt_interface).start();
|
||||
Self { addr }
|
||||
}
|
||||
|
||||
fn listen<L: 'static>(&mut self, listener: L)
|
||||
where
|
||||
L: Listenable<UpdateListenerActor>,
|
||||
{
|
||||
self.addr.do_send(Listen(listener));
|
||||
}
|
||||
|
||||
pub fn listen_zones(&mut self, zones: watch::Receiver<Zones>) {
|
||||
self.listen(zones);
|
||||
}
|
||||
|
||||
pub fn listen_zone_events(&mut self, zone_events: ZoneEventRecv) {
|
||||
self.listen(zone_events);
|
||||
}
|
||||
|
||||
pub fn listen_zone_runner(&mut self, zone_runner_state_recv: ZoneRunnerStateRecv) {
|
||||
self.listen(zone_runner_state_recv);
|
||||
}
|
||||
|
||||
pub fn listen_programs(&mut self, programs: watch::Receiver<Programs>) {
|
||||
self.listen(programs);
|
||||
}
|
||||
|
||||
pub fn listen_program_events(&mut self, program_events: ProgramEventRecv) {
|
||||
self.listen(program_events);
|
||||
}
|
||||
|
||||
pub async fn quit(self) -> eyre::Result<()> {
|
||||
Ok(self.addr.send(Quit).await?)
|
||||
}
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
use sprinklers_actors::zone_runner::{ZoneRun, ZoneRunState, ZoneRunnerState};
|
||||
use sprinklers_core::model::ZoneId;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::Serialize;
|
||||
use std::time::SystemTime;
|
||||
use tokio::time::Instant;
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ZoneRunJson {
|
||||
id: i32,
|
||||
// TODO: change nomenclature
|
||||
#[serde(rename = "section")]
|
||||
zone: ZoneId,
|
||||
total_duration: f64,
|
||||
duration: f64,
|
||||
start_time: Option<String>,
|
||||
pause_time: Option<String>,
|
||||
unpause_time: Option<String>,
|
||||
}
|
||||
|
||||
impl ZoneRunJson {
|
||||
fn from_run(run: &ZoneRun) -> Option<Self> {
|
||||
let (now, system_now) = (Instant::now(), SystemTime::now());
|
||||
let instant_to_string = |instant: Instant| -> String {
|
||||
DateTime::<Utc>::from(system_now - now.duration_since(instant)).to_rfc3339()
|
||||
};
|
||||
let (start_time, pause_time) = match run.state {
|
||||
ZoneRunState::Finished | ZoneRunState::Cancelled => {
|
||||
return None;
|
||||
}
|
||||
ZoneRunState::Waiting => (None, None),
|
||||
ZoneRunState::Running { start_time } => (Some(instant_to_string(start_time)), None),
|
||||
ZoneRunState::Paused {
|
||||
start_time,
|
||||
pause_time,
|
||||
} => (
|
||||
Some(instant_to_string(start_time)),
|
||||
Some(instant_to_string(pause_time)),
|
||||
),
|
||||
};
|
||||
Some(Self {
|
||||
id: run.handle.clone().into_inner(),
|
||||
zone: run.zone.id,
|
||||
total_duration: run.total_duration.as_secs_f64(),
|
||||
duration: run.duration.as_secs_f64(),
|
||||
start_time,
|
||||
pause_time,
|
||||
unpause_time: None, // TODO: this is kinda useless, should probably be removed
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ZoneRunnerStateJson {
|
||||
queue: Vec<ZoneRunJson>,
|
||||
current: Option<ZoneRunJson>,
|
||||
paused: bool,
|
||||
}
|
||||
|
||||
impl From<&ZoneRunnerState> for ZoneRunnerStateJson {
|
||||
fn from(state: &ZoneRunnerState) -> Self {
|
||||
let mut run_queue = state.run_queue.iter();
|
||||
let current = run_queue.next().and_then(|run| ZoneRunJson::from_run(run));
|
||||
let queue = run_queue
|
||||
.filter_map(|run| ZoneRunJson::from_run(run))
|
||||
.collect();
|
||||
Self {
|
||||
queue,
|
||||
current,
|
||||
paused: state.paused,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
[package]
|
||||
name = "sprinklers_rs"
|
||||
version = "0.1.0"
|
||||
authors = ["Alex Mikhalev <alexmikhalevalex@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = ["sprinklers_linux"]
|
||||
bundled_sqlite = ["sprinklers_database/bundled"]
|
||||
|
||||
[dependencies]
|
||||
sprinklers_core = { path = "../sprinklers_core" }
|
||||
sprinklers_database = { path = "../sprinklers_database" }
|
||||
sprinklers_actors = { path = "../sprinklers_actors" }
|
||||
sprinklers_mqtt = { path = "../sprinklers_mqtt" }
|
||||
sprinklers_linux = { path = "../sprinklers_linux", optional = true }
|
||||
|
||||
color-eyre = "0.5.1"
|
||||
eyre = "0.6.0"
|
||||
tokio = "0.2.22"
|
||||
tracing = { version = "0.1.19" }
|
||||
actix = { version = "0.10.0", default-features = false }
|
||||
actix-rt = "1.1.1"
|
||||
chrono = "0.4.19"
|
||||
serde = { version = "1.0.116", features = ["derive"] }
|
||||
config = { version = "0.10.1", default-features = false, features = ["json"] }
|
||||
|
||||
[dependencies.tracing-subscriber]
|
||||
version = "0.2.11"
|
||||
default-features = false
|
||||
features = ["registry", "fmt", "env-filter", "ansi"]
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"mqtt": {
|
||||
"broker_host": "localhost",
|
||||
"broker_port": 1883,
|
||||
"client_id": "sprinklers_rs-0001",
|
||||
"device_id": "sprinklers_rs-0001"
|
||||
},
|
||||
"zone_interface": {
|
||||
"provider": "Mock",
|
||||
"num_zones": 6
|
||||
}
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
#![warn(clippy::all)]
|
||||
#![warn(clippy::print_stdout)]
|
||||
|
||||
// mod option_future;
|
||||
mod settings;
|
||||
mod state_manager;
|
||||
mod zone_interface;
|
||||
|
||||
use sprinklers_actors as actors;
|
||||
use sprinklers_database as database;
|
||||
use sprinklers_mqtt as mqtt;
|
||||
|
||||
use eyre::{Result, WrapErr};
|
||||
use settings::Settings;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> Result<()> {
|
||||
color_eyre::install()?;
|
||||
tracing_subscriber::fmt()
|
||||
.with_ansi(true)
|
||||
.with_env_filter(
|
||||
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")),
|
||||
)
|
||||
.init();
|
||||
|
||||
let settings: Settings = Settings::new().wrap_err("could not load settings")?;
|
||||
|
||||
info!("Starting sprinklers_rs...");
|
||||
|
||||
let db_conn = database::setup_db()?;
|
||||
|
||||
let zone_interface = settings.zone_interface.build()?;
|
||||
let mut zone_runner = actors::ZoneRunner::new(zone_interface);
|
||||
let mut program_runner = actors::ProgramRunner::new(zone_runner.clone());
|
||||
|
||||
let state_manager = crate::state_manager::StateManagerThread::start(db_conn);
|
||||
|
||||
let mqtt_options = settings.mqtt;
|
||||
// TODO: have ability to update zones / other data
|
||||
let request_context = mqtt::RequestContext {
|
||||
zones: state_manager.get_zones(),
|
||||
zone_runner: zone_runner.clone(),
|
||||
program_runner: program_runner.clone(),
|
||||
state_manager: state_manager.clone(),
|
||||
};
|
||||
let mqtt_interface = mqtt::MqttInterfaceTask::start(mqtt_options, request_context);
|
||||
|
||||
let mut update_listener = mqtt::UpdateListener::start(mqtt_interface.clone());
|
||||
update_listener.listen_zones(state_manager.get_zones());
|
||||
update_listener.listen_zone_events(zone_runner.subscribe().await?);
|
||||
update_listener.listen_zone_runner(zone_runner.get_state_recv());
|
||||
update_listener.listen_programs(state_manager.get_programs());
|
||||
update_listener.listen_program_events(program_runner.subscribe().await?);
|
||||
|
||||
// Only listen to programs now so above subscriptions get events
|
||||
program_runner.listen_zones(state_manager.get_zones());
|
||||
program_runner.listen_programs(state_manager.get_programs());
|
||||
|
||||
info!("sprinklers_rs initialized");
|
||||
|
||||
tokio::signal::ctrl_c().await?;
|
||||
info!("Interrupt received, shutting down");
|
||||
|
||||
update_listener.quit().await?;
|
||||
mqtt_interface.quit().await?;
|
||||
drop(state_manager);
|
||||
program_runner.quit().await?;
|
||||
zone_runner.quit().await?;
|
||||
actix::System::current().stop();
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
use crate::zone_interface::ZoneInterfaceConfig;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::trace;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(remote = "sprinklers_mqtt::Options")]
|
||||
struct MqttOptions {
|
||||
pub broker_host: String,
|
||||
pub broker_port: u16,
|
||||
pub device_id: String,
|
||||
pub client_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Settings {
|
||||
#[serde(with = "MqttOptions")]
|
||||
pub mqtt: sprinklers_mqtt::Options,
|
||||
#[serde(default)]
|
||||
pub zone_interface: ZoneInterfaceConfig,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
pub fn new() -> eyre::Result<Self> {
|
||||
let mut s = config::Config::new();
|
||||
|
||||
let default_config = config::File::from_str(
|
||||
include_str!("../sprinklers_rs.default.json"),
|
||||
config::FileFormat::Json,
|
||||
);
|
||||
s.merge(default_config)?;
|
||||
|
||||
// TODO: specify configuration path from arguments or env
|
||||
s.merge(config::File::with_name("sprinklers_rs").required(false))?;
|
||||
|
||||
s.merge(config::Environment::with_prefix("SPRINKLERS").separator("__"))?;
|
||||
|
||||
let settings: Settings = s.try_into()?;
|
||||
|
||||
trace!("settings: {:#?}", settings);
|
||||
|
||||
Ok(settings)
|
||||
}
|
||||
}
|
@ -1,120 +0,0 @@
|
||||
use sprinklers_actors::{state_manager, StateManager};
|
||||
use sprinklers_database::{self as database, DbConn};
|
||||
|
||||
use eyre::{eyre, WrapErr};
|
||||
use sprinklers_core::model::{ProgramRef, Programs, Zones};
|
||||
use tokio::{
|
||||
runtime,
|
||||
sync::{mpsc, watch},
|
||||
};
|
||||
use tracing::{trace, warn};
|
||||
|
||||
pub struct StateManagerThread {
|
||||
db_conn: DbConn,
|
||||
request_rx: mpsc::Receiver<state_manager::Request>,
|
||||
zones_tx: watch::Sender<Zones>,
|
||||
programs_tx: watch::Sender<Programs>,
|
||||
}
|
||||
|
||||
struct State {
|
||||
zones: Zones,
|
||||
programs: Programs,
|
||||
}
|
||||
|
||||
impl StateManagerThread {
|
||||
pub fn start(db_conn: DbConn) -> StateManager {
|
||||
let (request_tx, request_rx) = mpsc::channel(8);
|
||||
let (zones_tx, zones_rx) = watch::channel(Zones::default());
|
||||
let (programs_tx, programs_rx) = watch::channel(Programs::default());
|
||||
let task = StateManagerThread {
|
||||
db_conn,
|
||||
request_rx,
|
||||
zones_tx,
|
||||
programs_tx,
|
||||
};
|
||||
let runtime_handle = runtime::Handle::current();
|
||||
std::thread::Builder::new()
|
||||
.name("sprinklers_rs::state_manager".into())
|
||||
.spawn(move || task.run(runtime_handle))
|
||||
.expect("could not start state_manager thread");
|
||||
StateManager::new(request_tx, zones_rx, programs_rx)
|
||||
}
|
||||
|
||||
fn broadcast_zones(&mut self, zones: Zones) {
|
||||
if let Err(err) = self.zones_tx.broadcast(zones) {
|
||||
warn!("could not broadcast zones: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
fn broadcast_programs(&mut self, programs: Programs) {
|
||||
if let Err(err) = self.programs_tx.broadcast(programs) {
|
||||
warn!("could not broadcast programs: {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
request: state_manager::Request,
|
||||
state: &mut State,
|
||||
) -> eyre::Result<()> {
|
||||
use state_manager::Request;
|
||||
|
||||
match request {
|
||||
Request::UpdateProgram {
|
||||
id,
|
||||
update,
|
||||
resp_tx,
|
||||
} => {
|
||||
// HACK: would really like stable try notation
|
||||
let res = (|| -> state_manager::Result<ProgramRef> {
|
||||
let mut trans = self
|
||||
.db_conn
|
||||
.transaction()
|
||||
.wrap_err("failed to start transaction")?;
|
||||
database::update_program(&mut trans, id, &update).map_err(|err| {
|
||||
if let Some(e) = err.downcast_ref::<database::NoSuchProgram>() {
|
||||
state_manager::StateError::NoSuchProgram(e.0)
|
||||
} else {
|
||||
err.into()
|
||||
}
|
||||
})?;
|
||||
let new_program: ProgramRef = database::query_program_by_id(&trans, id)?.into();
|
||||
state.programs.insert(new_program.id, new_program.clone());
|
||||
trans.commit().wrap_err("could not commit transaction")?;
|
||||
self.broadcast_programs(state.programs.clone());
|
||||
Ok(new_program)
|
||||
})();
|
||||
resp_tx
|
||||
.send(res)
|
||||
.map_err(|_| eyre!("could not respond to UpdateProgram"))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_state(&mut self) -> eyre::Result<State> {
|
||||
let zones = database::query_zones(&self.db_conn)?;
|
||||
|
||||
for zone in zones.values() {
|
||||
trace!(zone = debug(&zone), "read zone");
|
||||
}
|
||||
|
||||
let programs =
|
||||
database::query_programs(&self.db_conn).wrap_err("could not query programs")?;
|
||||
|
||||
Ok(State { zones, programs })
|
||||
}
|
||||
|
||||
fn run(mut self, runtime_handle: runtime::Handle) {
|
||||
let mut state = self.load_state().expect("could not load initial state");
|
||||
|
||||
self.broadcast_zones(state.zones.clone());
|
||||
self.broadcast_programs(state.programs.clone());
|
||||
|
||||
while let Some(request) = runtime_handle.block_on(self.request_rx.recv()) {
|
||||
if let Err(err) = self.handle_request(request, &mut state) {
|
||||
warn!("error handling request: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
use sprinklers_core::zone_interface::{MockZoneInterface, ZoneInterface, ZoneNum};
|
||||
|
||||
#[cfg(feature = "sprinklers_linux")]
|
||||
use sprinklers_linux::LinuxGpioConfig;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(tag = "provider")]
|
||||
pub enum ZoneInterfaceConfig {
|
||||
Mock {
|
||||
num_zones: ZoneNum,
|
||||
},
|
||||
#[cfg(feature = "sprinklers_linux")]
|
||||
LinuxGpio(LinuxGpioConfig),
|
||||
}
|
||||
|
||||
impl Default for ZoneInterfaceConfig {
|
||||
fn default() -> Self {
|
||||
ZoneInterfaceConfig::Mock { num_zones: 6 }
|
||||
}
|
||||
}
|
||||
|
||||
impl ZoneInterfaceConfig {
|
||||
pub fn build(self) -> eyre::Result<Arc<dyn ZoneInterface>> {
|
||||
Ok(match self {
|
||||
ZoneInterfaceConfig::Mock { num_zones } => Arc::new(MockZoneInterface::new(num_zones)),
|
||||
#[cfg(feature = "sprinklers_linux")]
|
||||
ZoneInterfaceConfig::LinuxGpio(config) => Arc::new(config.build()?),
|
||||
})
|
||||
}
|
||||
}
|
27
src/db.rs
Normal file
27
src/db.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use crate::migrations::{Migrations, SimpleMigration};
|
||||
|
||||
pub fn create_migrations() -> Migrations {
|
||||
let mut migs = Migrations::new();
|
||||
migs.add(SimpleMigration::new_box(
|
||||
1,
|
||||
"CREATE TABLE sections (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
interface_id INTEGER NOT NULL
|
||||
);",
|
||||
"DROP TABLE sections;",
|
||||
));
|
||||
migs.add(SimpleMigration::new_box(
|
||||
2,
|
||||
"INSERT INTO sections (id, name, interface_id)
|
||||
VALUES
|
||||
(1, 'Front Yard Middle', 0),
|
||||
(2, 'Front Yard Left', 1),
|
||||
(3, 'Front Yard Right', 2),
|
||||
(4, 'Back Yard Middle', 3),
|
||||
(5, 'Back Yard Sauna', 4),
|
||||
(6, 'Garden', 5);",
|
||||
"DELETE FROM sections;",
|
||||
));
|
||||
migs
|
||||
}
|
51
src/main.rs
Normal file
51
src/main.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use color_eyre::eyre::Result;
|
||||
use rusqlite::Connection as DbConnection;
|
||||
use rusqlite::NO_PARAMS;
|
||||
use tracing::info;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
mod db;
|
||||
mod migrations;
|
||||
mod model;
|
||||
mod option_future;
|
||||
mod section_interface;
|
||||
mod section_runner;
|
||||
#[cfg(test)]
|
||||
mod trace_listeners;
|
||||
|
||||
use model::Section;
|
||||
|
||||
fn setup_db() -> Result<DbConnection> {
|
||||
// let conn = DbConnection::open_in_memory()?;
|
||||
let mut conn = DbConnection::open("test.db")?;
|
||||
|
||||
let migs = db::create_migrations();
|
||||
migs.apply(&mut conn)?;
|
||||
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
fn query_sections(conn: &DbConnection) -> Result<Vec<Section>> {
|
||||
let mut statement = conn.prepare_cached("SELECT id, name, interface_id FROM sections;")?;
|
||||
let rows = statement.query_map(NO_PARAMS, Section::from_sql)?;
|
||||
Ok(rows.collect::<Result<Vec<_>, _>>()?)
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_ansi(true)
|
||||
.with_env_filter(
|
||||
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")),
|
||||
)
|
||||
.init();
|
||||
color_eyre::install()?;
|
||||
|
||||
let conn = setup_db()?;
|
||||
|
||||
let sections = query_sections(&conn)?;
|
||||
for sec in sections {
|
||||
info!(section = debug(&sec), "read section");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -3,7 +3,7 @@ use rusqlite::{params, Connection};
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::Bound::{Excluded, Unbounded};
|
||||
use thiserror::Error;
|
||||
use tracing::{debug, info, trace};
|
||||
use tracing::{debug, trace, info};
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum MigrationError {
|
||||
@ -85,9 +85,7 @@ pub fn get_db_version(conn: &Connection) -> MigrationResult<MigrationVersion> {
|
||||
}
|
||||
|
||||
let version: u32 = conn.query_row(
|
||||
"SELECT v.version \
|
||||
FROM db_version AS v \
|
||||
WHERE id = 1",
|
||||
"SELECT version FROM db_version WHERE id = 1",
|
||||
NO_PARAMS,
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
@ -116,17 +114,11 @@ pub struct Migrations {
|
||||
migrations: BTreeMap<MigrationVersion, Box<dyn Migration>>,
|
||||
}
|
||||
|
||||
impl Default for Migrations {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
migrations: BTreeMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Migrations {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
Self {
|
||||
migrations: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, migration: Box<dyn Migration>) {
|
||||
@ -152,11 +144,7 @@ impl Migrations {
|
||||
last_ver = *ver;
|
||||
}
|
||||
if last_ver != NO_MIGRATIONS {
|
||||
info!(
|
||||
old_version = db_version,
|
||||
new_version = last_ver,
|
||||
"applied database migrations"
|
||||
);
|
||||
info!(old_version = db_version, new_version = last_ver, "applied database migrations");
|
||||
set_db_version(&trans, last_ver)?;
|
||||
}
|
||||
trans.commit()?;
|
3
src/model/mod.rs
Normal file
3
src/model/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
mod section;
|
||||
|
||||
pub use section::{Section, SectionRef};
|
27
src/model/section.rs
Normal file
27
src/model/section.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use crate::section_interface::SectionId;
|
||||
use rusqlite::{Error as SqlError, Row as SqlRow, ToSql};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Section {
|
||||
pub id: u32,
|
||||
pub name: String,
|
||||
pub interface_id: SectionId,
|
||||
}
|
||||
|
||||
impl Section {
|
||||
pub fn from_sql<'a>(row: &SqlRow<'a>) -> Result<Section, SqlError> {
|
||||
Ok(Section {
|
||||
id: row.get(0)?,
|
||||
name: row.get(1)?,
|
||||
interface_id: row.get(2)?,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn as_sql(&self) -> Vec<&dyn ToSql> {
|
||||
vec![&self.id, &self.name, &self.interface_id]
|
||||
}
|
||||
}
|
||||
|
||||
pub type SectionRef = Arc<Section>;
|
65
src/section_interface.rs
Normal file
65
src/section_interface.rs
Normal file
@ -0,0 +1,65 @@
|
||||
use std::iter::repeat_with;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use tracing::debug;
|
||||
|
||||
pub type SectionId = u32;
|
||||
|
||||
pub trait SectionInterface: Send {
|
||||
fn num_sections(&self) -> SectionId;
|
||||
fn set_section_state(&self, id: SectionId, running: bool);
|
||||
fn get_section_state(&self, id: SectionId) -> bool;
|
||||
}
|
||||
|
||||
pub struct MockSectionInterface {
|
||||
states: Vec<AtomicBool>,
|
||||
}
|
||||
|
||||
impl MockSectionInterface {
|
||||
#[allow(dead_code)]
|
||||
pub fn new(num_sections: SectionId) -> Self {
|
||||
Self {
|
||||
states: repeat_with(|| AtomicBool::new(false))
|
||||
.take(num_sections as usize)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SectionInterface for MockSectionInterface {
|
||||
fn num_sections(&self) -> SectionId {
|
||||
self.states.len() as SectionId
|
||||
}
|
||||
fn set_section_state(&self, id: SectionId, running: bool) {
|
||||
debug!(id, running, "setting section");
|
||||
self.states[id as usize].store(running, Ordering::SeqCst);
|
||||
}
|
||||
fn get_section_state(&self, id: SectionId) -> bool {
|
||||
self.states[id as usize].load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_mock_section_interface() {
|
||||
let iface = MockSectionInterface::new(6);
|
||||
assert_eq!(iface.num_sections(), 6);
|
||||
for i in 0..6u32 {
|
||||
assert_eq!(iface.get_section_state(i), false);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
iface.set_section_state(i, true);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
assert_eq!(iface.get_section_state(i), true);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
iface.set_section_state(i, false);
|
||||
}
|
||||
for i in 0..6u32 {
|
||||
assert_eq!(iface.get_section_state(i), false);
|
||||
}
|
||||
}
|
||||
}
|
620
src/section_runner.rs
Normal file
620
src/section_runner.rs
Normal file
@ -0,0 +1,620 @@
|
||||
use crate::model::SectionRef;
|
||||
use crate::option_future::OptionFuture;
|
||||
use crate::section_interface::SectionInterface;
|
||||
use mpsc::error::SendError;
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
mem::swap,
|
||||
sync::{
|
||||
atomic::{AtomicI32, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
spawn,
|
||||
sync::mpsc,
|
||||
time::{delay_for, Instant},
|
||||
};
|
||||
use tracing::{debug, trace, trace_span, warn};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RunHandle(i32);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SectionRunnerInner {
|
||||
next_run_id: AtomicI32,
|
||||
}
|
||||
|
||||
impl SectionRunnerInner {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
next_run_id: AtomicI32::new(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum RunnerMsg {
|
||||
Quit,
|
||||
QueueRun(RunHandle, SectionRef, Duration),
|
||||
CancelRun(RunHandle),
|
||||
CancelAll,
|
||||
Pause,
|
||||
Unpause,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum RunState {
|
||||
Waiting,
|
||||
Running {
|
||||
start_time: Instant,
|
||||
},
|
||||
Finished,
|
||||
Cancelled,
|
||||
Paused {
|
||||
start_time: Instant,
|
||||
pause_time: Instant,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SecRun {
|
||||
handle: RunHandle,
|
||||
section: SectionRef,
|
||||
duration: Duration,
|
||||
total_duration: Duration,
|
||||
state: RunState,
|
||||
}
|
||||
|
||||
impl SecRun {
|
||||
fn new(handle: RunHandle, section: SectionRef, duration: Duration) -> Self {
|
||||
Self {
|
||||
handle,
|
||||
section,
|
||||
duration,
|
||||
total_duration: duration,
|
||||
state: RunState::Waiting,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_running(&self) -> bool {
|
||||
matches!(self.state, RunState::Running{..})
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn is_paused(&self) -> bool {
|
||||
matches!(self.state, RunState::Paused{..})
|
||||
}
|
||||
}
|
||||
|
||||
struct RunnerTask {
|
||||
interface: Arc<dyn SectionInterface + Sync>,
|
||||
msg_recv: mpsc::Receiver<RunnerMsg>,
|
||||
running: bool,
|
||||
delay_future: OptionFuture<tokio::time::Delay>,
|
||||
paused: bool,
|
||||
}
|
||||
|
||||
impl RunnerTask {
|
||||
fn new(
|
||||
interface: Arc<dyn SectionInterface + Sync>,
|
||||
msg_recv: mpsc::Receiver<RunnerMsg>,
|
||||
) -> Self {
|
||||
Self {
|
||||
interface,
|
||||
msg_recv,
|
||||
running: true,
|
||||
delay_future: None.into(),
|
||||
paused: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn start_run(&mut self, run: &mut SecRun) {
|
||||
use RunState::*;
|
||||
debug!(section_id = run.section.id, "starting running section");
|
||||
self.interface
|
||||
.set_section_state(run.section.interface_id, true);
|
||||
run.state = Running {
|
||||
start_time: Instant::now(),
|
||||
};
|
||||
}
|
||||
|
||||
fn finish_run(&mut self, run: &mut SecRun) {
|
||||
if run.is_running() {
|
||||
debug!(section_id = run.section.id, "finished running section");
|
||||
self.interface
|
||||
.set_section_state(run.section.interface_id, false);
|
||||
run.state = RunState::Finished;
|
||||
} else {
|
||||
warn!(
|
||||
section_id = run.section.id,
|
||||
state = debug(&run.state),
|
||||
"cannot finish run which is not running"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn cancel_run(&mut self, run: &mut SecRun) {
|
||||
if run.is_running() {
|
||||
debug!(section_id = run.section.id, "cancelling running section");
|
||||
self.interface
|
||||
.set_section_state(run.section.interface_id, false);
|
||||
}
|
||||
run.state = RunState::Cancelled;
|
||||
}
|
||||
|
||||
fn pause_run(&mut self, run: &mut SecRun) {
|
||||
use RunState::*;
|
||||
match run.state {
|
||||
Running { start_time } => {
|
||||
debug!(section_id = run.section.id, "pausing running section");
|
||||
self.interface
|
||||
.set_section_state(run.section.interface_id, false);
|
||||
run.state = Paused {
|
||||
start_time,
|
||||
pause_time: Instant::now(),
|
||||
};
|
||||
}
|
||||
Waiting => {
|
||||
debug!(section_id = run.section.id, "pausing waiting section");
|
||||
run.state = Paused {
|
||||
start_time: Instant::now(),
|
||||
pause_time: Instant::now(),
|
||||
};
|
||||
}
|
||||
Finished | Cancelled | Paused { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn unpause_run(&mut self, run: &mut SecRun) {
|
||||
use RunState::*;
|
||||
match run.state {
|
||||
Paused {
|
||||
start_time,
|
||||
pause_time,
|
||||
} => {
|
||||
debug!(section_id = run.section.id, "unpausing section");
|
||||
self.interface
|
||||
.set_section_state(run.section.interface_id, true);
|
||||
run.state = Running {
|
||||
start_time: Instant::now(),
|
||||
};
|
||||
let ran_for = pause_time - start_time;
|
||||
run.duration -= ran_for;
|
||||
}
|
||||
Waiting | Finished | Cancelled | Running { .. } => {
|
||||
warn!(
|
||||
section_id = run.section.id,
|
||||
state = debug(&run.state),
|
||||
"can only unpause paused section"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_queue(&mut self, run_queue: &mut VecDeque<SecRun>) {
|
||||
use RunState::*;
|
||||
loop {
|
||||
let current_run = match run_queue.front_mut() {
|
||||
Some(current_run) => current_run,
|
||||
None => break,
|
||||
};
|
||||
let run_finished = match (¤t_run.state, self.paused) {
|
||||
(Waiting, false) => {
|
||||
self.start_run(current_run);
|
||||
self.delay_future = Some(delay_for(current_run.duration)).into();
|
||||
false
|
||||
}
|
||||
(Running { start_time }, false) => {
|
||||
let time_to_finish = start_time.elapsed() >= current_run.duration;
|
||||
if time_to_finish {
|
||||
self.finish_run(current_run);
|
||||
self.delay_future = None.into();
|
||||
}
|
||||
time_to_finish
|
||||
}
|
||||
(Paused { .. }, false) => {
|
||||
self.unpause_run(current_run);
|
||||
self.delay_future = Some(delay_for(current_run.duration)).into();
|
||||
false
|
||||
}
|
||||
(Waiting, true) => {
|
||||
self.pause_run(current_run);
|
||||
false
|
||||
}
|
||||
(Running { .. }, true) => {
|
||||
self.pause_run(current_run);
|
||||
self.delay_future = None.into();
|
||||
false
|
||||
}
|
||||
(Paused { .. }, true) => false,
|
||||
(Cancelled, _) | (Finished, _) => true,
|
||||
};
|
||||
|
||||
if run_finished {
|
||||
run_queue.pop_front();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_msg(&mut self, msg: Option<RunnerMsg>, run_queue: &mut VecDeque<SecRun>) {
|
||||
let msg = msg.expect("SectionRunner channel closed");
|
||||
use RunnerMsg::*;
|
||||
trace!(msg = debug(&msg), "runner_task recv");
|
||||
match msg {
|
||||
Quit => self.running = false,
|
||||
QueueRun(handle, section, duration) => {
|
||||
run_queue.push_back(SecRun::new(handle, section, duration));
|
||||
}
|
||||
CancelRun(handle) => {
|
||||
for run in run_queue {
|
||||
if run.handle != handle {
|
||||
continue;
|
||||
}
|
||||
trace!(handle = handle.0, "cancelling run by handle");
|
||||
self.cancel_run(run);
|
||||
}
|
||||
}
|
||||
CancelAll => {
|
||||
let mut old_runs = VecDeque::new();
|
||||
swap(&mut old_runs, run_queue);
|
||||
trace!(count = old_runs.len(), "cancelling all runs");
|
||||
for mut run in old_runs {
|
||||
self.cancel_run(&mut run);
|
||||
}
|
||||
}
|
||||
Pause => {
|
||||
self.paused = true;
|
||||
}
|
||||
Unpause => {
|
||||
self.paused = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn start(mut self) {
|
||||
let span = trace_span!("runner_task");
|
||||
let _enter = span.enter();
|
||||
|
||||
let mut run_queue: VecDeque<SecRun> = VecDeque::new();
|
||||
|
||||
while self.running {
|
||||
self.process_queue(&mut run_queue);
|
||||
let delay_done = || {
|
||||
trace!("delay done");
|
||||
};
|
||||
tokio::select! {
|
||||
msg = self.msg_recv.recv() => self.handle_msg(msg, &mut run_queue),
|
||||
_ = &mut self.delay_future, if self.delay_future.is_some() => delay_done()
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Error)]
|
||||
#[error("the SectionRunner channel is closed")]
|
||||
pub struct ChannelClosed;
|
||||
|
||||
pub type Result<T, E = ChannelClosed> = std::result::Result<T, E>;
|
||||
|
||||
impl<T> From<SendError<T>> for ChannelClosed {
|
||||
fn from(_: SendError<T>) -> Self {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SectionRunner {
|
||||
inner: Arc<SectionRunnerInner>,
|
||||
msg_send: mpsc::Sender<RunnerMsg>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl SectionRunner {
|
||||
pub fn new(interface: Arc<dyn SectionInterface + Sync>) -> Self {
|
||||
let (msg_send, msg_recv) = mpsc::channel(8);
|
||||
spawn(RunnerTask::new(interface, msg_recv).start());
|
||||
Self {
|
||||
inner: Arc::new(SectionRunnerInner::new()),
|
||||
msg_send,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn quit(&mut self) -> Result<()> {
|
||||
self.msg_send.send(RunnerMsg::Quit).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn queue_run(
|
||||
&mut self,
|
||||
section: SectionRef,
|
||||
duration: Duration,
|
||||
) -> Result<RunHandle> {
|
||||
let run_id = self.inner.next_run_id.fetch_add(1, Ordering::Relaxed);
|
||||
let handle = RunHandle(run_id);
|
||||
self.msg_send
|
||||
.send(RunnerMsg::QueueRun(handle.clone(), section, duration))
|
||||
.await?;
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub async fn cancel_run(&mut self, handle: RunHandle) -> Result<()> {
|
||||
self.msg_send.send(RunnerMsg::CancelRun(handle)).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cancel_all(&mut self) -> Result<()> {
|
||||
self.msg_send.send(RunnerMsg::CancelAll).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pause(&mut self) -> Result<()> {
|
||||
self.msg_send.send(RunnerMsg::Pause).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unpause(&mut self) -> Result<()> {
|
||||
self.msg_send.send(RunnerMsg::Unpause).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::section_interface::MockSectionInterface;
|
||||
use crate::{
|
||||
model::Section,
|
||||
trace_listeners::{EventListener, Filters, SpanFilters, SpanListener},
|
||||
};
|
||||
use tracing_subscriber::prelude::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quit() {
|
||||
let quit_msg = EventListener::new(
|
||||
Filters::new()
|
||||
.target("sprinklers_rs::section_runner")
|
||||
.message("runner_task recv")
|
||||
.field_value("msg", "Quit"),
|
||||
);
|
||||
let task_span = SpanListener::new(
|
||||
SpanFilters::new()
|
||||
.target("sprinklers_rs::section_runner")
|
||||
.name("runner_task"),
|
||||
);
|
||||
let subscriber = tracing_subscriber::registry()
|
||||
.with(quit_msg.clone())
|
||||
.with(task_span.clone());
|
||||
let _sub = tracing::subscriber::set_default(subscriber);
|
||||
|
||||
let interface = MockSectionInterface::new(6);
|
||||
let mut runner = SectionRunner::new(Arc::new(interface));
|
||||
tokio::task::yield_now().await;
|
||||
runner.quit().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_eq!(quit_msg.get_count(), 1);
|
||||
assert_eq!(task_span.get_exit_count(), 1);
|
||||
}
|
||||
|
||||
fn make_sections_and_interface() -> (Vec<SectionRef>, Arc<MockSectionInterface>) {
|
||||
let interface = Arc::new(MockSectionInterface::new(2));
|
||||
let sections: Vec<SectionRef> = vec![
|
||||
Arc::new(Section {
|
||||
id: 1,
|
||||
name: "Section 1".into(),
|
||||
interface_id: 0,
|
||||
}),
|
||||
Arc::new(Section {
|
||||
id: 2,
|
||||
name: "Section 2".into(),
|
||||
interface_id: 1,
|
||||
}),
|
||||
];
|
||||
(sections, interface)
|
||||
}
|
||||
|
||||
fn assert_section_states(interface: &MockSectionInterface, states: &[bool]) {
|
||||
for (id, state) in states.iter().enumerate() {
|
||||
assert_eq!(
|
||||
interface.get_section_state(id as u32),
|
||||
*state,
|
||||
"section interface id {} did not match",
|
||||
id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn advance(dur: Duration) {
|
||||
// HACK: advance should really be enough, but we need another yield_now
|
||||
tokio::time::pause();
|
||||
tokio::time::advance(dur).await;
|
||||
tokio::task::yield_now().await;
|
||||
tokio::time::resume();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_queue() {
|
||||
let (sections, interface) = make_sections_and_interface();
|
||||
let mut runner = SectionRunner::new(interface.clone());
|
||||
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
// Queue single section, make sure it runs
|
||||
runner
|
||||
.queue_run(sections[0].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_section_states(&interface, &[true, false]);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
// Queue two sections, make sure they run one at a time
|
||||
runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
runner
|
||||
.queue_run(sections[0].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
|
||||
assert_section_states(&interface, &[true, false]);
|
||||
|
||||
advance(Duration::from_secs(10)).await;
|
||||
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cancel_run() {
|
||||
let (sections, interface) = make_sections_and_interface();
|
||||
let mut runner = SectionRunner::new(interface.clone());
|
||||
|
||||
let run1 = runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _run2 = runner
|
||||
.queue_run(sections[0].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let run3 = runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
runner.cancel_run(run1).await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
assert_section_states(&interface, &[true, false]);
|
||||
|
||||
runner.cancel_run(run3).await.unwrap();
|
||||
advance(Duration::from_secs(11)).await;
|
||||
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cancel_all() {
|
||||
let (sections, interface) = make_sections_and_interface();
|
||||
let mut runner = SectionRunner::new(interface.clone());
|
||||
|
||||
runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
runner
|
||||
.queue_run(sections[0].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
runner.cancel_all().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.cancel_all().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pause() {
|
||||
let (sections, interface) = make_sections_and_interface();
|
||||
let mut runner = SectionRunner::new(interface.clone());
|
||||
|
||||
let _run1 = runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let run2 = runner
|
||||
.queue_run(sections[0].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let _run3 = runner
|
||||
.queue_run(sections[1].clone(), Duration::from_secs(10))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
runner.pause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
advance(Duration::from_secs(10)).await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.unpause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(8)).await;
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(2)).await;
|
||||
assert_section_states(&interface, &[true, false]);
|
||||
|
||||
runner.pause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
// cancel paused run
|
||||
runner.cancel_run(run2).await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.unpause().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
assert_section_states(&interface, &[false, true]);
|
||||
|
||||
advance(Duration::from_secs(11)).await;
|
||||
assert_section_states(&interface, &[false, false]);
|
||||
|
||||
runner.quit().await.unwrap();
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
@ -117,7 +117,7 @@ impl<'a> Visit for TraceListenerVisitor<'a> {
|
||||
{
|
||||
if field.name() == filter_field.name {
|
||||
if let Some(filter_field_value) = &filter_field.value {
|
||||
*right = value_str.starts_with(filter_field_value);
|
||||
*right = &value_str == filter_field_value;
|
||||
} else {
|
||||
*right = true;
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user