morz-infoboard/server/backend/internal/app/app.go
Jesko Anschütz ccec32c832 feat(scheduler): Reconciler gleicht Ist- und Soll-Display-Zustand ab
Fügt Reconcile() und desiredState() zum Scheduler-Package hinzu.
Der Reconciler läuft alle 5 Minuten, berechnet den Soll-Zustand aus
den konfigurierten Ein-/Ausschaltzeiten (inkl. Mitternacht-Überschreitung)
und sendet bei Abweichung oder unbekanntem Ist-Zustand einen MQTT-Befehl.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-27 18:33:48 +01:00

176 lines
5.3 KiB
Go

package app
import (
"context"
"crypto/rand"
"encoding/hex"
"errors"
"log"
"log/slog"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"git.az-it.net/az/morz-infoboard/server/backend/internal/config"
"git.az-it.net/az/morz-infoboard/server/backend/internal/db"
"git.az-it.net/az/morz-infoboard/server/backend/internal/httpapi"
"git.az-it.net/az/morz-infoboard/server/backend/internal/mqttnotifier"
"git.az-it.net/az/morz-infoboard/server/backend/internal/scheduler"
"git.az-it.net/az/morz-infoboard/server/backend/internal/store"
)
type App struct {
Config config.Config
server *http.Server
notifier *mqttnotifier.Notifier
authStore *store.AuthStore
scheduleStore *store.ScreenScheduleStore
screenStore *store.ScreenStore
dbPool *db.Pool // V7: für db.Close() im Shutdown
logger *log.Logger
}
func New() (*App, error) {
cfg := config.Load()
// Kompatibilitäts-Logger für db.Connect (erwartet *log.Logger).
logger := log.New(os.Stdout, "backend ", log.LstdFlags|log.LUTC)
// Ensure upload directory exists.
if err := os.MkdirAll(cfg.UploadDir, 0755); err != nil {
return nil, err
}
// Connect to database and run migrations.
pool, err := db.Connect(context.Background(), cfg.DatabaseURL, logger)
if err != nil {
return nil, err
}
// Status store (existing in-memory/file store).
statusStore, err := httpapi.NewStoreFromConfig(cfg.StatusStorePath)
if err != nil {
pool.Close()
return nil, err
}
// Domain stores.
tenants := store.NewTenantStore(pool.Pool)
screens := store.NewScreenStore(pool.Pool)
media := store.NewMediaStore(pool.Pool)
playlists := store.NewPlaylistStore(pool.Pool)
authStore := store.NewAuthStore(pool.Pool)
schedules := store.NewScreenScheduleStore(pool.Pool)
// Ensure admin user exists — generate a random password if none is configured.
adminPassword := cfg.AdminPassword
if adminPassword == "" {
buf := make([]byte, 16)
if _, err := rand.Read(buf); err != nil {
pool.Close()
return nil, err
}
adminPassword = hex.EncodeToString(buf)
// V6: slog statt log.Printf — Passwort nie loggen (K5).
slog.Info("admin password generated", "event", "admin_password_generated", "password", "[gesetzt]")
}
if err := authStore.EnsureAdminUser(context.Background(), cfg.DefaultTenantSlug, adminPassword); err != nil {
slog.Error("ensure admin user failed", "event", "ensure_admin_user_failed", "err", err)
// Non-fatal: server starts even if admin setup fails.
}
// Screenshot store (in-memory).
ss := httpapi.NewScreenshotStore()
// MQTT notifier (no-op when broker not configured).
notifier := mqttnotifier.New(cfg.MQTTBroker, cfg.MQTTUsername, cfg.MQTTPassword)
if cfg.MQTTBroker != "" {
slog.Info("mqtt notifier enabled", "event", "mqtt_notifier_enabled", "broker", cfg.MQTTBroker)
} else {
slog.Info("mqtt notifier disabled", "event", "mqtt_notifier_disabled", "reason", "no_broker_configured")
}
handler := httpapi.NewRouter(httpapi.RouterDeps{
StatusStore: statusStore,
TenantStore: tenants,
ScreenStore: screens,
MediaStore: media,
PlaylistStore: playlists,
AuthStore: authStore,
Notifier: notifier,
ScreenshotStore: ss,
ScheduleStore: schedules,
Config: cfg,
UploadDir: cfg.UploadDir,
Logger: logger,
})
return &App{
Config: cfg,
server: &http.Server{Addr: cfg.HTTPAddress, Handler: handler},
notifier: notifier,
authStore: authStore,
scheduleStore: schedules,
screenStore: screens,
dbPool: pool, // V7: Referenz für Shutdown
logger: logger,
}, nil
}
func (a *App) Run() error {
defer a.notifier.Close()
// W2+V7: Graceful Shutdown mit Signal-Handling.
// Der Context wird bei SIGTERM/SIGINT abgebrochen, was den Shutdown einleitet.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Session-Cleanup: expired sessions werden stündlich aus der DB entfernt.
go func() {
ticker := time.NewTicker(1 * time.Hour)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := a.authStore.CleanExpiredSessions(ctx); err != nil {
slog.Error("session cleanup failed", "event", "session_cleanup_failed", "err", err)
} else {
slog.Info("session cleanup ok", "event", "session_cleanup_ok")
}
case <-ctx.Done():
return
}
}
}()
// Display-Zeitplan-Scheduler
go scheduler.Run(ctx, a.scheduleStore, a.screenStore, a.notifier)
go scheduler.Reconcile(ctx, a.scheduleStore, a.screenStore, a.screenStore, a.notifier)
// W2: Signal-Handler für Graceful Shutdown.
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
go func() {
sig := <-sigCh
slog.Info("shutdown signal received", "event", "shutdown_signal", "signal", sig.String())
cancel() // Session-Cleanup stoppen.
// HTTP-Server mit Timeout herunterfahren.
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 15*time.Second)
defer shutdownCancel()
if err := a.server.Shutdown(shutdownCtx); err != nil {
slog.Error("shutdown error", "event", "shutdown_error", "err", err)
}
// V7: DB-Pool schließen.
a.dbPool.Close()
slog.Info("shutdown complete", "event", "shutdown_complete")
}()
err := a.server.ListenAndServe()
if errors.Is(err, http.ErrServerClosed) {
return nil
}
return err
}