Compare commits

..

7 Commits

Author SHA1 Message Date
2a7740d8d7 Refactor reading args 2024-07-03 13:31:12 +02:00
7580ca5399 Enable build for linux 2024-07-03 13:31:03 +02:00
58cce74ce8 Remove escapes from code "blocks" 2024-07-01 21:06:01 +02:00
e022a838ba Solve the race condition when recursively reading files, hopefully 2024-07-01 21:00:38 +02:00
0a627ae9ca Add readme 2024-07-01 20:38:38 +02:00
d72644aec3 Code format
I think? Don't know what changed
2024-07-01 20:30:19 +02:00
eeb8dac3a0 Add insane ramblings 2024-07-01 20:28:49 +02:00
6 changed files with 551 additions and 503 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
main.exe
main_linux

35
README.md Normal file
View File

@@ -0,0 +1,35 @@
# synclib
A small Go tool for creating symbolic links
Created out of infuriating difficulty of creating symbolic links on windows
## Custom syntax
The tool works with "instructions" that describe symbolic links
They are, in any form, \<source>,\<destination>,\<force?>
For example:
`sync this,that`
It supports input of these instructions through:
- Stdin
- `echo "this,that" | sync`
- Run arguments
- `sync this,that foo,bar "foo 2","C:/bar"`
- Files
- `sync -f <file>`
- Where the file contains instructions, one instruction per line
- Directories
- `sync -r <directory>`
- This mode will look for "sync" files recursively in directories and run their instructions
## Use case
I have a lot of folders (documents, projects, configurations) backed up via Seafile and to have the software using those folders find them at their usual location I'm creating soft symbolic links from the seafile drive to their original location
It would be problematic to have to redo all (or some part) of these symlinks when reinstalling the OS or having something somewhere explode (say software uninstalled) so I have all the instructions in sync files in individual folders in the seafile drive
Which means I can easily back up my configuration and `sync -r ~/Seafile` to symlink it where it belongs

View File

@@ -1 +1,2 @@
go build main && cp main.exe "/c/Program Files/Git/usr/bin/cln.exe"
GOOS=windows GOARCH=amd64 go build -o main.exe main && cp main.exe "/c/Program Files/Git/usr/bin/cln.exe"
GOOS=linux GOARCH=amd64 go build -o main_linux main

View File

@@ -197,7 +197,7 @@ func ReadFromArgs(output chan LinkInstruction, status chan error) {
defer close(status)
log.Printf("Reading input from args")
for _, arg := range os.Args[1:] {
for _, arg := range flag.Args() {
instruction, err := ParseInstruction(arg)
if err != nil {
log.Printf("Error parsing arg: %s'%s'%s, error: %s%+v%s", SourceColor, arg, DefaultColor, ErrorColor, err, DefaultColor)

21
util.go
View File

@@ -75,6 +75,7 @@ func GetSyncFilesRecursively(input string, output chan string, status chan error
var wg sync.WaitGroup
var initial sync.Once
var done bool
wg.Add(1)
directories := make(chan string, 100000)
workerPool := make(chan struct{}, 4000)
@@ -82,12 +83,12 @@ func GetSyncFilesRecursively(input string, output chan string, status chan error
go func() {
for {
fmt.Printf("\rFiles processed: %d; Folders processed: %d; Workers: %d; Directory Stack Size: %d;", filesProcessed, foldersProcessed, len(workerPool), len(directories))
fmt.Printf("\rFiles processed: %d; Folders processed: %d; Workers: %d; Directory Stack Size: %d;", atomic.LoadInt32((&filesProcessed)), atomic.LoadInt32(&foldersProcessed), len(workerPool), len(directories))
<-progressTicker.C
}
}()
log.Printf("%+v", len(workerPool))
// log.Printf("%+v", len(workerPool))
go func() {
for directory := range directories {
workerPool <- struct{}{}
@@ -117,16 +118,26 @@ func GetSyncFilesRecursively(input string, output chan string, status chan error
}
}
// log.Printf("Done reading directory %s", directory)
done = len(directories) == 0
if done {
initial.Do(func() {
// Parallelism is very difficult...
time.Sleep(250 * time.Millisecond)
wg.Done()
})
}
}(directory)
}
}()
// This actually does not go through ALL files sadly...
// It so happens (very often) that we manage to quit between one iteration ending
// And another beginning
// In such a state workgroup is decreased and, before it has a chance to increase, we are done
// What I should do here is only terminate if directories is empty
// ...but how do I do that?
// I might be wrong... Fuck knows...
// It also sometimes happens that wg.Wait triggers after wg.Done on line 97 but before the next (what would be!) wg.Add on line 94
// This happens much more often with a small number of workers
// Such is the nature of race conditions...
wg.Wait()
log.Printf("Files processed: %d; Folders processed: %d", filesProcessed, foldersProcessed)
}