Hopefully fix race conditions and add debug flag
This commit is contained in:
19
main.go
19
main.go
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"flag"
|
"flag"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
@@ -32,14 +33,24 @@ var FileRegex, _ = regexp.Compile(`^sync$`)
|
|||||||
var programName = os.Args[0]
|
var programName = os.Args[0]
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
// Format:
|
|
||||||
// source,target,force?
|
|
||||||
log.SetFlags(log.Lmicroseconds)
|
|
||||||
|
|
||||||
recurse := flag.String("r", "", "recurse into directories")
|
recurse := flag.String("r", "", "recurse into directories")
|
||||||
file := flag.String("f", "", "file to read instructions from")
|
file := flag.String("f", "", "file to read instructions from")
|
||||||
|
debug := flag.Bool("d", false, "debug")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
if *debug {
|
||||||
|
log.SetFlags(log.Lmicroseconds | log.Lshortfile)
|
||||||
|
logFile, err := os.Create("main.log")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error creating log file: %v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
logger := io.MultiWriter(os.Stdout, logFile)
|
||||||
|
log.SetOutput(logger)
|
||||||
|
} else {
|
||||||
|
log.SetFlags(log.Lmicroseconds)
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("Recurse: %s", *recurse)
|
log.Printf("Recurse: %s", *recurse)
|
||||||
log.Printf("File: %s", *file)
|
log.Printf("File: %s", *file)
|
||||||
|
|
||||||
|
20
util.go
20
util.go
@@ -72,20 +72,21 @@ func GetSyncFilesRecursively(input string, output chan string, status chan error
|
|||||||
var foldersProcessed int32
|
var foldersProcessed int32
|
||||||
progressTicker := time.NewTicker(200 * time.Millisecond)
|
progressTicker := time.NewTicker(200 * time.Millisecond)
|
||||||
defer progressTicker.Stop()
|
defer progressTicker.Stop()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var initial sync.Once
|
||||||
|
wg.Add(1)
|
||||||
|
directories := make(chan string, 100000)
|
||||||
|
workerPool := make(chan struct{}, 10000)
|
||||||
|
directories <- input
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
fmt.Printf("\rFiles processed: %d; Folders processed: %d;", filesProcessed, foldersProcessed)
|
fmt.Printf("\rFiles processed: %d; Folders processed: %d; Workers: %d; Directory Stack Size: %d;", filesProcessed, foldersProcessed, len(workerPool), len(directories))
|
||||||
<-progressTicker.C
|
<-progressTicker.C
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(1)
|
|
||||||
var initial sync.Once
|
|
||||||
directories := make(chan string, 10000)
|
|
||||||
workerPool := make(chan struct{}, 10000)
|
|
||||||
directories <- input
|
|
||||||
|
|
||||||
log.Printf("%+v", len(workerPool))
|
log.Printf("%+v", len(workerPool))
|
||||||
go func() {
|
go func() {
|
||||||
for directory := range directories {
|
for directory := range directories {
|
||||||
@@ -96,7 +97,6 @@ func GetSyncFilesRecursively(input string, output chan string, status chan error
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-workerPool }()
|
defer func() { <-workerPool }()
|
||||||
|
|
||||||
// log.Printf("Reading directory %s", directory)
|
|
||||||
files, err := os.ReadDir(directory)
|
files, err := os.ReadDir(directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error reading directory %s: %+v", directory, err)
|
log.Printf("Error reading directory %s: %+v", directory, err)
|
||||||
@@ -119,6 +119,8 @@ func GetSyncFilesRecursively(input string, output chan string, status chan error
|
|||||||
// log.Printf("Done reading directory %s", directory)
|
// log.Printf("Done reading directory %s", directory)
|
||||||
|
|
||||||
initial.Do(func() {
|
initial.Do(func() {
|
||||||
|
// Parallelism is very difficult...
|
||||||
|
time.Sleep(250 * time.Millisecond)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
})
|
})
|
||||||
}(directory)
|
}(directory)
|
||||||
|
Reference in New Issue
Block a user