package main import ( "flag" "fmt" "os" "sort" "strings" "sync" "time" "modify/processor" "modify/utils" "github.com/go-git/go-git/v5" "modify/logger" ) type GlobalStats struct { TotalMatches int TotalModifications int ProcessedFiles int FailedFiles int ModificationsPerCommand sync.Map } var ( repo *git.Repository worktree *git.Worktree stats GlobalStats = GlobalStats{ ModificationsPerCommand: sync.Map{}, } ) func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [options] <...files_or_globs>\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\nOptions:\n") fmt.Fprintf(os.Stderr, " -git\n") fmt.Fprintf(os.Stderr, " Use git to manage files\n") fmt.Fprintf(os.Stderr, " -reset\n") fmt.Fprintf(os.Stderr, " Reset files to their original state\n") fmt.Fprintf(os.Stderr, " -loglevel string\n") fmt.Fprintf(os.Stderr, " Set logging level: ERROR, WARNING, INFO, DEBUG, TRACE (default \"INFO\")\n") fmt.Fprintf(os.Stderr, "\nExamples:\n") fmt.Fprintf(os.Stderr, " Regex mode (default):\n") fmt.Fprintf(os.Stderr, " %s \"(\\d+)\" \"*1.5\" data.xml\n", os.Args[0]) fmt.Fprintf(os.Stderr, "\nNote: v1, v2, etc. are used to refer to capture groups as numbers.\n") fmt.Fprintf(os.Stderr, " s1, s2, etc. are used to refer to capture groups as strings.\n") fmt.Fprintf(os.Stderr, " Helper functions: num(str) converts string to number, str(num) converts number to string\n") fmt.Fprintf(os.Stderr, " is_number(str) checks if a string is numeric\n") fmt.Fprintf(os.Stderr, " If expression starts with an operator like *, /, +, -, =, etc., v1 is automatically prepended\n") fmt.Fprintf(os.Stderr, " You can use any valid Lua code, including if statements, loops, etc.\n") fmt.Fprintf(os.Stderr, " Glob patterns are supported for file selection (*.xml, data/**.xml, etc.)\n") } // TODO: Fix bed shitting when doing *.yml in barotrauma directory flag.Parse() args := flag.Args() level := logger.ParseLevel(*utils.LogLevel) logger.Init(level) logger.Info("Initializing with log level: %s", level.String()) // The plan is: // Load all commands commands, err := utils.LoadCommands(args) if err != nil { logger.Error("Failed to load commands: %v", err) flag.Usage() return } if *utils.Filter != "" { logger.Info("Filtering commands by name: %s", *utils.Filter) commands = utils.FilterCommands(commands, *utils.Filter) logger.Info("Filtered %d commands", len(commands)) } // Then aggregate all the globs and deduplicate them globs := utils.AggregateGlobs(commands) logger.Debug("Aggregated %d globs before deduplication", utils.CountGlobsBeforeDedup(commands)) for _, command := range commands { logger.Trace("Command: %s", command.Name) logger.Trace("Regex: %s", command.Regex) logger.Trace("Files: %v", command.Files) logger.Trace("Lua: %s", command.Lua) logger.Trace("Git: %t", command.Git) logger.Trace("Reset: %t", command.Reset) logger.Trace("Isolate: %t", command.Isolate) logger.Trace("LogLevel: %s", command.LogLevel) } // Resolve all the files for all the globs logger.Info("Found %d unique file patterns", len(globs)) files, err := utils.ExpandGLobs(globs) if err != nil { logger.Error("Failed to expand file patterns: %v", err) return } logger.Info("Found %d files to process", len(files)) // Somehow connect files to commands via globs.. // For each file check every glob of every command // Maybe memoize this part // That way we know what commands affect what files associations, err := utils.AssociateFilesWithCommands(files, commands) if err != nil { logger.Error("Failed to associate files with commands: %v", err) return } // Then for each file run all commands associated with the file workers := make(chan struct{}, *utils.ParallelFiles) wg := sync.WaitGroup{} // Add performance tracking startTime := time.Now() var fileMutex sync.Mutex // This aggregation is great but what if one modification replaces the whole entire file? // Shit...... // TODO: Add "Isolate" field to modifications which makes them run alone for file, association := range associations { workers <- struct{}{} wg.Add(1) logger.SafeGoWithArgs(func(args ...interface{}) { defer func() { <-workers }() defer wg.Done() // Track per-file processing time fileStartTime := time.Now() err := RunIsolateCommands(association, file, &fileMutex) if err != nil { logger.Error("Failed to run isolate commands for file %q: %v", file, err) return } err = RunOtherCommands(file, association, &fileMutex) if err != nil { logger.Error("Failed to run other commands for file %q: %v", file, err) return } logger.Debug("File %q processed in %v", file, time.Since(fileStartTime)) }, file, commands) } wg.Wait() processingTime := time.Since(startTime) logger.Info("Processing completed in %v", processingTime) if stats.ProcessedFiles > 0 { logger.Info("Average time per file: %v", processingTime/time.Duration(stats.ProcessedFiles)) } // TODO: Also give each command its own logger, maybe prefix it with something... Maybe give commands a name? // Do that with logger.WithField("loglevel", level.String()) // Since each command also has its own log level // TODO: Maybe even figure out how to run individual commands...? // TODO: What to do with git? Figure it out .... // if *gitFlag { // logger.Info("Git integration enabled, setting up git repository") // err := setupGit() // if err != nil { // logger.Error("Failed to setup git: %v", err) // fmt.Fprintf(os.Stderr, "Error setting up git: %v\n", err) // return // } // } // logger.Debug("Expanding file patterns") // files, err := expandFilePatterns(filePatterns) // if err != nil { // logger.Error("Failed to expand file patterns: %v", err) // fmt.Fprintf(os.Stderr, "Error expanding file patterns: %v\n", err) // return // } // if *gitFlag { // logger.Info("Cleaning up git files before processing") // err := cleanupGitFiles(files) // if err != nil { // logger.Error("Failed to cleanup git files: %v", err) // fmt.Fprintf(os.Stderr, "Error cleaning up git files: %v\n", err) // return // } // } // if *resetFlag { // logger.Info("Files reset to their original state, nothing more to do") // log.Printf("Files reset to their original state, nothing more to do") // return // } // Print summary if stats.TotalModifications == 0 { logger.Warning("No modifications were made in any files") } else { logger.Info("Operation complete! Modified %d values in %d/%d files", stats.TotalModifications, stats.ProcessedFiles, stats.ProcessedFiles+stats.FailedFiles) sortedCommands := []string{} stats.ModificationsPerCommand.Range(func(key, value interface{}) bool { sortedCommands = append(sortedCommands, key.(string)) return true }) sort.Strings(sortedCommands) for _, command := range sortedCommands { count, _ := stats.ModificationsPerCommand.Load(command) if count.(int) > 0 { logger.Info("\tCommand %q made %d modifications", command, count) } else { logger.Warning("\tCommand %q made no modifications", command) } } } } func RunOtherCommands(file string, association utils.FileCommandAssociation, fileMutex *sync.Mutex) error { fileData, err := os.ReadFile(file) if err != nil { return fmt.Errorf("failed to read file %q: %w", file, err) } logger.Trace("Loaded %d bytes of data for file %q", len(fileData), file) fileDataStr := string(fileData) // Aggregate all the modifications and execute them modifications := []utils.ReplaceCommand{} for _, command := range association.Commands { logger.Info("Processing file %q with command %q", file, command.Regex) newModifications, err := processor.ProcessRegex(fileDataStr, command, file) if err != nil { return fmt.Errorf("failed to process file %q with command %q: %w", file, command.Regex, err) } modifications = append(modifications, newModifications...) // It is not guranteed that all the commands will be executed... // TODO: Make this better // We'd have to pass the map to executemodifications or something... count, ok := stats.ModificationsPerCommand.Load(command.Name) if !ok { count = 0 } stats.ModificationsPerCommand.Store(command.Name, count.(int)+len(newModifications)) } if len(modifications) == 0 { logger.Info("No modifications found for file %q", file) return nil } // Sort commands in reverse order for safe replacements fileDataStr, count := utils.ExecuteModifications(modifications, fileDataStr) fileMutex.Lock() stats.ProcessedFiles++ stats.TotalModifications += count fileMutex.Unlock() logger.Info("Executed %d modifications for file %q", count, file) err = os.WriteFile(file, []byte(fileDataStr), 0644) if err != nil { return fmt.Errorf("failed to write file %q: %w", file, err) } return nil } func RunIsolateCommands(association utils.FileCommandAssociation, file string, fileMutex *sync.Mutex) error { for _, isolateCommand := range association.IsolateCommands { fileData, err := os.ReadFile(file) if err != nil { return fmt.Errorf("failed to read file %q: %w", file, err) } logger.Trace("Loaded %d bytes of data for file %q", len(fileData), file) fileDataStr := string(fileData) logger.Info("Processing file %q with isolate command %q", file, isolateCommand.Regex) modifications, err := processor.ProcessRegex(fileDataStr, isolateCommand, file) if err != nil { return fmt.Errorf("failed to process file %q with isolate command %q: %w", file, isolateCommand.Regex, err) } if len(modifications) == 0 { logger.Warning("No modifications found for file %q", file) return nil } fileDataStr, count := utils.ExecuteModifications(modifications, fileDataStr) fileMutex.Lock() stats.ProcessedFiles++ stats.TotalModifications += count fileMutex.Unlock() logger.Info("Executed %d isolate modifications for file %q", count, file) err = os.WriteFile(file, []byte(fileDataStr), 0644) if err != nil { return fmt.Errorf("failed to write file %q: %w", file, err) } } return nil }