diff options
author | Alan Pearce | 2024-05-24 18:31:56 +0200 |
---|---|---|
committer | Alan Pearce | 2024-05-24 18:31:56 +0200 |
commit | e9eed3ddc4229db707cccb30beddde15044eff16 (patch) | |
tree | c586eae45a4aa99fd1a971c2bd29ad2e74d14975 /searchix.go | |
parent | 2c1491de56d0c3e2f4cb0b0c1e33035510f72fc5 (diff) | |
download | searchix-e9eed3ddc4229db707cccb30beddde15044eff16.tar.lz searchix-e9eed3ddc4229db707cccb30beddde15044eff16.tar.zst searchix-e9eed3ddc4229db707cccb30beddde15044eff16.zip |
refactor: split server cmd and module
It should now be possible to run the server from inside another go application by importing the main module and running its Start() function
Diffstat (limited to 'searchix.go')
-rw-r--r-- | searchix.go | 281 |
1 files changed, 131 insertions, 150 deletions
diff --git a/searchix.go b/searchix.go index cf1a429..c10eb6a 100644 --- a/searchix.go +++ b/searchix.go @@ -1,12 +1,9 @@ -package main +package searchix import ( - "flag" - "fmt" + "context" "log" "log/slog" - "os" - "os/signal" "slices" "sync" "time" @@ -18,21 +15,7 @@ import ( "github.com/getsentry/sentry-go" "github.com/pelletier/go-toml/v2" -) - -var buildVersion string - -var ( - configFile = flag.String("config", "config.toml", "config `file` to use") - printDefaultConfig = flag.Bool( - "print-default-config", - false, - "print default configuration and exit", - ) - liveReload = flag.Bool("live", false, "whether to enable live reloading (development)") - replace = flag.Bool("replace", false, "replace existing index and exit") - update = flag.Bool("update", false, "update index and exit") - version = flag.Bool("version", false, "print version information") + "github.com/pkg/errors" ) func nextOccurrenceOfLocalTime(t toml.LocalTime) time.Time { @@ -55,84 +38,43 @@ func nextOccurrenceOfLocalTime(t toml.LocalTime) time.Time { return nextRun } -func main() { - flag.Parse() - if *version { - fmt.Fprintf(os.Stderr, "searchix %s", buildVersion) - if buildVersion != config.CommitSHA && buildVersion != config.ShortSHA { - fmt.Fprintf(os.Stderr, " %s", config.CommitSHA) - } - _, err := fmt.Fprint(os.Stderr, "\n") - if err != nil { - panic("can't write to standard error?!") - } - os.Exit(0) - } - if *printDefaultConfig { - _, err := fmt.Print(config.GetDefaultConfig()) - if err != nil { - panic("can't write to standard output?!") - } - os.Exit(0) - } - - cfg, err := config.GetConfig(*configFile) - if err != nil { - // only use log functions after the config file has been read successfully - fmt.Fprintf(os.Stderr, "error parsing configuration file: %v", err) - os.Exit(1) - } - slog.SetLogLoggerLevel(cfg.LogLevel) - if cfg.Web.Environment == "production" { - log.SetFlags(0) - } else { - log.SetFlags(log.LstdFlags) - } - - err = sentry.Init(sentry.ClientOptions{ - EnableTracing: true, - TracesSampleRate: 1.0, - Dsn: cfg.Web.SentryDSN, - Environment: cfg.Web.Environment, - }) - if err != nil { - slog.Warn("could not initialise sentry", "error", err) - } - +func (s *Server) SetupIndex(update bool, replace bool) error { var i uint - cfgEnabledSources := make([]string, len(cfg.Importer.Sources)) - for key := range cfg.Importer.Sources { + cfgEnabledSources := make([]string, len(s.cfg.Importer.Sources)) + for key := range s.cfg.Importer.Sources { cfgEnabledSources[i] = key i++ } slices.Sort(cfgEnabledSources) - read, write, exists, err := index.OpenOrCreate(cfg.DataPath, *replace) + read, write, exists, err := index.OpenOrCreate(s.cfg.DataPath, replace) if err != nil { - log.Fatalf("Failed to open or create index: %v", err) + return errors.Wrap(err, "Failed to open or create index") } + s.readIndex = read + s.writeIndex = write - if !exists || *replace || *update { + if !exists || replace || update { slog.Info( "Starting build job", "new", !exists, "replace", - *replace, + replace, "update", - *update, + update, ) - err = importer.Start(cfg, write, *replace, nil) + err = importer.Start(s.cfg, write, replace, nil) if err != nil { - log.Fatalf("Failed to build index: %v", err) + return errors.Wrap(err, "Failed to build index") } - if *replace || *update { - return + if replace || update { + return nil } } else { indexedSources, err := read.GetEnabledSources() if err != nil { - log.Fatalln("failed to get enabled sources from index") + return errors.Wrap(err, "Failed to get enabled sources from index") } slices.Sort(indexedSources) if !slices.Equal(cfgEnabledSources, indexedSources) { @@ -144,9 +86,9 @@ func main() { }) if len(newSources) > 0 { slog.Info("adding new sources", "sources", newSources) - err := importer.Start(cfg, write, false, &newSources) + err := importer.Start(s.cfg, write, false, &newSources) if err != nil { - log.Fatalf("failed to update index with new sources: %v", err) + return errors.Wrap(err, "Failed to update index with new sources") } } if len(retiredSources) > 0 { @@ -154,94 +96,133 @@ func main() { for _, s := range retiredSources { err := write.DeleteBySource(s) if err != nil { - log.Fatalf("failed to remove retired source %s from index: %v", s, err) + return errors.Wrapf(err, "Failed to remove retired source %s", s) } } } } } - c := make(chan os.Signal, 2) - signal.Notify(c, os.Interrupt) - sv, err := server.New(cfg, read, *liveReload) + return nil +} + +type Server struct { + sv *server.Server + wg *sync.WaitGroup + cfg *config.Config + sentryHub *sentry.Hub + readIndex *index.ReadIndex + writeIndex *index.WriteIndex +} + +func New(cfg *config.Config) (*Server, error) { + slog.SetLogLoggerLevel(cfg.LogLevel) + if cfg.Web.Environment == "production" { + log.SetFlags(0) + } else { + log.SetFlags(log.LstdFlags) + } + + err := sentry.Init(sentry.ClientOptions{ + EnableTracing: true, + TracesSampleRate: 1.0, + Dsn: cfg.Web.SentryDSN, + Environment: cfg.Web.Environment, + }) if err != nil { - log.Fatalf("error setting up server: %v", err) + slog.Warn("could not initialise sentry", "error", err) } - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - sig := <-c - log.Printf("signal captured: %v", sig) - <-sv.Stop() - slog.Debug("server stopped") - }() - - go func(localHub *sentry.Hub) { - const monitorSlug = "import" - localHub.WithScope(func(scope *sentry.Scope) { - scope.SetContext("monitor", sentry.Context{"slug": monitorSlug}) - monitorConfig := &sentry.MonitorConfig{ - Schedule: sentry.IntervalSchedule(1, sentry.MonitorScheduleUnitDay), - // minutes - MaxRuntime: 10, - CheckInMargin: 5, - Timezone: time.Local.String(), - } - nextRun := nextOccurrenceOfLocalTime(cfg.Importer.UpdateAt.LocalTime) - for { - slog.Debug("scheduling next run", "next-run", nextRun) - <-time.After(time.Until(nextRun)) - wg.Add(1) - slog.Info("updating index") + return &Server{ + cfg: cfg, + sentryHub: sentry.CurrentHub(), + }, nil +} - eventID := localHub.CaptureCheckIn(&sentry.CheckIn{ +func (s *Server) startUpdateTimer( + ctx context.Context, + localHub *sentry.Hub, +) { + const monitorSlug = "import" + localHub.WithScope(func(scope *sentry.Scope) { + var err error + scope.SetContext("monitor", sentry.Context{"slug": monitorSlug}) + monitorConfig := &sentry.MonitorConfig{ + Schedule: sentry.IntervalSchedule(1, sentry.MonitorScheduleUnitDay), + // minutes + MaxRuntime: 10, + CheckInMargin: 5, + Timezone: time.Local.String(), + } + + s.wg.Add(1) + nextRun := nextOccurrenceOfLocalTime(s.cfg.Importer.UpdateAt.LocalTime) + for { + slog.Debug("scheduling next run", "next-run", nextRun) + select { + case <-ctx.Done(): + slog.Debug("stopping scheduler") + s.wg.Done() + + return + case <-time.After(time.Until(nextRun)): + } + s.wg.Add(1) + slog.Info("updating index") + + eventID := localHub.CaptureCheckIn(&sentry.CheckIn{ + MonitorSlug: monitorSlug, + Status: sentry.CheckInStatusInProgress, + }, monitorConfig) + + err = importer.Start(s.cfg, s.writeIndex, false, nil) + s.wg.Done() + if err != nil { + slog.Warn("error updating index", "error", err) + + localHub.CaptureException(err) + localHub.CaptureCheckIn(&sentry.CheckIn{ + ID: *eventID, MonitorSlug: monitorSlug, - Status: sentry.CheckInStatusInProgress, + Status: sentry.CheckInStatusError, }, monitorConfig) + } else { + slog.Info("update complete") - err = importer.Start(cfg, write, false, nil) - wg.Done() - if err != nil { - slog.Warn("error updating index", "error", err) - - localHub.CaptureException(err) - localHub.CaptureCheckIn(&sentry.CheckIn{ - ID: *eventID, - MonitorSlug: monitorSlug, - Status: sentry.CheckInStatusError, - }, monitorConfig) - } else { - slog.Info("update complete") - - localHub.CaptureCheckIn(&sentry.CheckIn{ - ID: *eventID, - MonitorSlug: monitorSlug, - Status: sentry.CheckInStatusOK, - }, monitorConfig) - } - nextRun = nextRun.AddDate(0, 0, 1) + localHub.CaptureCheckIn(&sentry.CheckIn{ + ID: *eventID, + MonitorSlug: monitorSlug, + Status: sentry.CheckInStatusOK, + }, monitorConfig) } - }) - }(sentry.CurrentHub().Clone()) - - sErr := make(chan error) - wg.Add(1) - go func() { - defer wg.Done() - sErr <- sv.Start() - }() - - if cfg.Web.Environment == "development" { - log.Printf("server listening on %s", cfg.Web.BaseURL.String()) + nextRun = nextRun.AddDate(0, 0, 1) + } + }) +} + +func (s *Server) Start(ctx context.Context, liveReload bool) error { + var err error + s.sv, err = server.New(s.cfg, s.readIndex, liveReload) + if err != nil { + return errors.Wrap(err, "error setting up server") } - err = <-sErr + s.wg = &sync.WaitGroup{} + go s.startUpdateTimer(ctx, sentry.CurrentHub().Clone()) + + s.wg.Add(1) + err = s.sv.Start() if err != nil { - // Error starting or closing listener: - log.Fatalf("error: %v", err) + s.wg.Done() + + return errors.Wrap(err, "error starting server") } - sentry.Flush(2 * time.Second) - wg.Wait() + + return nil +} + +func (s *Server) Stop() { + <-s.sv.Stop() + defer s.wg.Done() + s.sentryHub.Flush(2 * time.Second) } |