2015-09-27 09:50:54 +01:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 21:43:32 +02:00
//
2015-03-07 21:36:35 +01:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2015-09-27 09:50:54 +01:00
// You can obtain one at http://mozilla.org/MPL/2.0/.
2014-06-01 22:50:14 +02:00
2014-05-15 00:26:55 -03:00
package model
2014-03-28 14:36:57 +01:00
import (
"errors"
2014-08-25 17:45:13 +02:00
"fmt"
2014-11-09 04:26:52 +00:00
"io/ioutil"
2015-01-02 16:15:53 +01:00
"math/rand"
2014-03-28 14:36:57 +01:00
"os"
"path/filepath"
2016-08-05 07:13:52 +00:00
"runtime"
2015-06-26 13:31:30 +02:00
"sort"
2016-01-03 21:15:02 +01:00
"strings"
2014-03-28 14:36:57 +01:00
"time"
2014-06-20 00:27:54 +02:00
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 19:38:46 +02:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/symlinks"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/versioner"
2014-03-28 14:36:57 +01:00
)
2016-05-04 10:47:33 +00:00
func init ( ) {
folderFactories [ config . FolderTypeReadWrite ] = newRWFolder
}
2014-09-27 14:44:15 +02:00
// A pullBlockState is passed to the puller routine for each block that needs
// to be fetched.
type pullBlockState struct {
* sharedPullerState
block protocol . BlockInfo
2014-03-28 14:36:57 +01:00
}
2014-09-27 14:44:15 +02:00
// A copyBlocksState is passed to copy routine if the file has blocks to be
2014-10-08 23:41:23 +01:00
// copied.
2014-09-27 14:44:15 +02:00
type copyBlocksState struct {
* sharedPullerState
blocks [ ] protocol . BlockInfo
2014-03-28 14:36:57 +01:00
}
2015-07-03 10:25:35 +01:00
// Which filemode bits to preserve
const retainBits = os . ModeSetgid | os . ModeSetuid | os . ModeSticky
2014-09-27 14:44:15 +02:00
var (
2014-09-28 12:05:25 +01:00
activity = newDeviceActivity ( )
2015-10-25 20:46:09 +00:00
errNoDevice = errors . New ( "peers who had this file went away, or the file has changed while syncing. will retry later" )
2014-09-27 14:44:15 +02:00
)
2015-06-16 12:12:34 +01:00
const (
dbUpdateHandleDir = iota
dbUpdateDeleteDir
dbUpdateHandleFile
dbUpdateDeleteFile
dbUpdateShortcutFile
)
2015-08-14 09:37:04 +02:00
const (
2015-10-08 00:25:32 +01:00
defaultCopiers = 1
defaultPullers = 16
defaultPullerSleep = 10 * time . Second
defaultPullerPause = 60 * time . Second
2015-08-14 09:37:04 +02:00
)
2015-06-16 12:12:34 +01:00
type dbUpdateJob struct {
file protocol . FileInfo
jobType int
}
2015-03-16 21:14:19 +01:00
type rwFolder struct {
2016-04-26 14:01:46 +00:00
folder
2015-03-16 21:14:19 +01:00
2015-05-13 14:57:29 +00:00
virtualMtimeRepo * db . VirtualMtimeRepo
2016-04-26 14:01:46 +00:00
dir string
versioner versioner . Versioner
ignorePerms bool
order config . PullOrder
maxConflicts int
sleep time . Duration
pause time . Duration
allowSparse bool
checkFreeSpace bool
2016-08-05 07:13:52 +00:00
ignoreDelete bool
copiers int
pullers int
2015-05-13 14:57:29 +00:00
2015-05-07 22:45:07 +02:00
queue * jobQueue
2015-06-16 12:12:34 +01:00
dbUpdates chan dbUpdateJob
2015-05-07 22:45:07 +02:00
pullTimer * time . Timer
remoteIndex chan struct { } // An index update was received, we should re-evaluate needs
2015-06-26 13:31:30 +02:00
errors map [ string ] string // path -> error string
errorsMut sync . Mutex
2016-05-09 12:56:21 +00:00
initialScanCompleted chan ( struct { } ) // exposed for testing
2015-03-16 21:14:19 +01:00
}
2016-05-04 10:47:33 +00:00
func newRWFolder ( model * Model , cfg config . FolderConfiguration , ver versioner . Versioner ) service {
2016-04-26 14:01:46 +00:00
f := & rwFolder {
folder : folder {
2016-06-29 06:37:34 +00:00
stateTracker : newStateTracker ( cfg . ID ) ,
scan : newFolderScanner ( cfg ) ,
stop : make ( chan struct { } ) ,
model : model ,
2015-04-22 23:54:31 +01:00
} ,
2015-03-16 21:14:19 +01:00
2016-04-26 14:01:46 +00:00
virtualMtimeRepo : db . NewVirtualMtimeRepo ( model . db , cfg . ID ) ,
dir : cfg . Path ( ) ,
2016-08-05 07:13:52 +00:00
versioner : ver ,
2016-04-26 14:01:46 +00:00
ignorePerms : cfg . IgnorePerms ,
copiers : cfg . Copiers ,
pullers : cfg . Pullers ,
order : cfg . Order ,
maxConflicts : cfg . MaxConflicts ,
allowSparse : ! cfg . DisableSparseFiles ,
checkFreeSpace : cfg . MinDiskFreePct != 0 ,
2016-08-05 07:13:52 +00:00
ignoreDelete : cfg . IgnoreDelete ,
2016-04-26 14:01:46 +00:00
2015-05-07 22:45:07 +02:00
queue : newJobQueue ( ) ,
2015-10-08 00:25:32 +01:00
pullTimer : time . NewTimer ( time . Second ) ,
2015-05-07 22:45:07 +02:00
remoteIndex : make ( chan struct { } , 1 ) , // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.
2015-06-26 13:31:30 +02:00
errorsMut : sync . NewMutex ( ) ,
2016-05-09 12:56:21 +00:00
initialScanCompleted : make ( chan struct { } ) ,
2015-03-16 21:14:19 +01:00
}
2015-08-14 09:37:04 +02:00
2016-04-26 14:01:46 +00:00
f . configureCopiersAndPullers ( cfg )
return f
}
func ( f * rwFolder ) configureCopiersAndPullers ( config config . FolderConfiguration ) {
if f . copiers == 0 {
f . copiers = defaultCopiers
2015-08-14 09:37:04 +02:00
}
2016-04-26 14:01:46 +00:00
if f . pullers == 0 {
f . pullers = defaultPullers
2015-08-14 09:37:04 +02:00
}
2016-04-26 14:01:46 +00:00
if config . PullerPauseS == 0 {
f . pause = defaultPullerPause
2015-10-08 00:25:32 +01:00
} else {
2016-04-26 14:01:46 +00:00
f . pause = time . Duration ( config . PullerPauseS ) * time . Second
2015-10-08 00:25:32 +01:00
}
2016-04-26 14:01:46 +00:00
if config . PullerSleepS == 0 {
f . sleep = defaultPullerSleep
2015-10-08 00:25:32 +01:00
} else {
2016-04-26 14:01:46 +00:00
f . sleep = time . Duration ( config . PullerSleepS ) * time . Second
2015-10-08 00:25:32 +01:00
}
2014-03-28 14:36:57 +01:00
}
2015-05-25 12:43:19 +02:00
// Helper function to check whether either the ignorePerm flag has been
// set on the local host or the FlagNoPermBits has been set on the file/dir
// which is being pulled.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) ignorePermissions ( file protocol . FileInfo ) bool {
2016-07-04 10:40:29 +00:00
return f . ignorePerms || file . NoPermissions
2015-05-25 12:43:19 +02:00
}
2014-09-27 14:44:15 +02:00
// Serve will run scans and pulls. It will return when Stop()ed or on a
// critical error.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) Serve ( ) {
l . Debugln ( f , "starting" )
defer l . Debugln ( f , "exiting" )
2014-03-28 14:36:57 +01:00
2014-09-27 14:44:15 +02:00
defer func ( ) {
2016-04-26 14:01:46 +00:00
f . pullTimer . Stop ( )
f . scan . timer . Stop ( )
2014-09-28 12:00:38 +01:00
// TODO: Should there be an actual FolderStopped state?
2016-04-26 14:01:46 +00:00
f . setState ( FolderIdle )
2014-09-27 14:44:15 +02:00
} ( )
2014-03-28 14:36:57 +01:00
2016-07-29 19:54:24 +00:00
var prevSec int64
2014-12-23 10:06:51 +01:00
var prevIgnoreHash string
2014-07-24 09:38:16 +02:00
2014-03-28 14:36:57 +01:00
for {
2014-09-27 14:44:15 +02:00
select {
2016-04-26 14:01:46 +00:00
case <- f . stop :
2014-09-27 14:44:15 +02:00
return
2014-07-24 09:38:16 +02:00
2016-04-26 14:01:46 +00:00
case <- f . remoteIndex :
2016-07-29 19:54:24 +00:00
prevSec = 0
2016-04-26 14:01:46 +00:00
f . pullTimer . Reset ( 0 )
l . Debugln ( f , "remote index updated, rescheduling pull" )
2015-05-07 22:45:07 +02:00
2016-04-26 14:01:46 +00:00
case <- f . pullTimer . C :
2016-05-09 12:56:21 +00:00
select {
case <- f . initialScanCompleted :
default :
// We don't start pulling files until a scan has been completed.
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "skip (initial)" )
f . pullTimer . Reset ( f . sleep )
2015-09-12 23:00:43 +02:00
continue
}
2016-04-26 14:01:46 +00:00
f . model . fmut . RLock ( )
curIgnores := f . model . folderIgnores [ f . folderID ]
f . model . fmut . RUnlock ( )
2014-12-23 10:06:51 +01:00
if newHash := curIgnores . Hash ( ) ; newHash != prevIgnoreHash {
// The ignore patterns have changed. We need to re-evaluate if
// there are files we need now that were ignored before.
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "ignore patterns have changed, resetting prevVer" )
2016-07-29 19:54:24 +00:00
prevSec = 0
2014-12-23 10:06:51 +01:00
prevIgnoreHash = newHash
}
2016-07-29 19:54:24 +00:00
// RemoteSequence() is a fast call, doesn't touch the database.
curSeq , ok := f . model . RemoteSequence ( f . folderID )
if ! ok || curSeq == prevSec {
l . Debugln ( f , "skip (curSeq == prevSeq)" , prevSec , ok )
2016-04-26 14:01:46 +00:00
f . pullTimer . Reset ( f . sleep )
2015-10-08 00:25:32 +01:00
continue
}
2016-04-26 14:01:46 +00:00
if err := f . model . CheckFolderHealth ( f . folderID ) ; err != nil {
l . Infoln ( "Skipping folder" , f . folderID , "pull due to folder error:" , err )
f . pullTimer . Reset ( f . sleep )
2014-09-27 14:44:15 +02:00
continue
2014-03-28 14:36:57 +01:00
}
2014-08-04 22:02:44 +02:00
2016-07-29 19:54:24 +00:00
l . Debugln ( f , "pulling" , prevSec , curSeq )
2015-06-26 13:31:30 +02:00
2016-04-26 14:01:46 +00:00
f . setState ( FolderSyncing )
f . clearErrors ( )
2014-09-27 14:44:15 +02:00
tries := 0
2015-06-26 13:31:30 +02:00
2014-09-27 14:44:15 +02:00
for {
tries ++
2014-10-24 23:20:08 +01:00
2016-04-26 14:01:46 +00:00
changed := f . pullerIteration ( curIgnores )
l . Debugln ( f , "changed" , changed )
2014-03-28 14:36:57 +01:00
2014-09-27 14:44:15 +02:00
if changed == 0 {
// No files were changed by the puller, so we are in
// sync. Remember the local version number and
// schedule a resync a little bit into the future.
2014-09-28 07:56:05 +02:00
2016-07-29 19:54:24 +00:00
if lv , ok := f . model . RemoteSequence ( f . folderID ) ; ok && lv < curSeq {
2014-09-28 12:00:38 +01:00
// There's a corner case where the device we needed
2014-09-28 07:56:05 +02:00
// files from disconnected during the puller
// iteration. The files will have been removed from
// the index, so we've concluded that we don't need
// them, but at the same time we have the local
// version that includes those files in curVer. So we
2016-07-29 19:54:24 +00:00
// catch the case that sequence might have
2014-12-23 10:06:51 +01:00
// decreased here.
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "adjusting curVer" , lv )
2016-07-29 19:54:24 +00:00
curSeq = lv
2014-09-28 07:56:05 +02:00
}
2016-07-29 19:54:24 +00:00
prevSec = curSeq
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "next pull in" , f . sleep )
f . pullTimer . Reset ( f . sleep )
2014-09-27 14:44:15 +02:00
break
}
2014-04-01 23:18:32 +02:00
2014-09-27 14:44:15 +02:00
if tries > 10 {
// We've tried a bunch of times to get in sync, but
// we're not making it. Probably there are write
// errors preventing us. Flag this with a warning and
// wait a bit longer before retrying.
2016-04-26 14:01:46 +00:00
l . Infof ( "Folder %q isn't making progress. Pausing puller for %v." , f . folderID , f . pause )
l . Debugln ( f , "next pull in" , f . pause )
2015-06-26 13:31:30 +02:00
2016-04-26 14:01:46 +00:00
if folderErrors := f . currentErrors ( ) ; len ( folderErrors ) > 0 {
2015-06-26 13:31:30 +02:00
events . Default . Log ( events . FolderErrors , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-06-26 13:31:30 +02:00
"errors" : folderErrors ,
} )
}
2016-04-26 14:01:46 +00:00
f . pullTimer . Reset ( f . pause )
2014-09-27 14:44:15 +02:00
break
}
}
2016-04-26 14:01:46 +00:00
f . setState ( FolderIdle )
2014-04-14 09:58:17 +02:00
2014-09-27 14:44:15 +02:00
// The reason for running the scanner from within the puller is that
// this is the easiest way to make sure we are not doing both at the
// same time.
2016-04-26 14:01:46 +00:00
case <- f . scan . timer . C :
err := f . scanSubdirsIfHealthy ( nil )
2016-06-29 06:37:34 +00:00
f . scan . Reschedule ( )
2016-03-30 06:53:47 +00:00
if err != nil {
2015-03-28 14:25:42 +00:00
continue
}
2016-05-09 12:56:21 +00:00
select {
case <- f . initialScanCompleted :
default :
2016-04-26 14:01:46 +00:00
l . Infoln ( "Completed initial scan (rw) of folder" , f . folderID )
2016-05-09 12:56:21 +00:00
close ( f . initialScanCompleted )
2014-09-30 17:34:31 +02:00
}
2015-05-03 14:18:32 +02:00
2016-04-26 14:01:46 +00:00
case req := <- f . scan . now :
req . err <- f . scanSubdirsIfHealthy ( req . subdirs )
2015-06-20 19:26:25 +02:00
2016-04-26 14:01:46 +00:00
case next := <- f . scan . delay :
f . scan . timer . Reset ( next )
2014-03-28 14:36:57 +01:00
}
}
}
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) IndexUpdated ( ) {
2015-05-07 22:45:07 +02:00
select {
2016-04-26 14:01:46 +00:00
case f . remoteIndex <- struct { } { } :
2015-05-07 22:45:07 +02:00
default :
// We might be busy doing a pull and thus not reading from this
// channel. The channel is 1-buffered, so one notification will be
// queued to ensure we recheck after the pull, but beyond that we must
// make sure to not block index receiving.
}
}
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) String ( ) string {
return fmt . Sprintf ( "rwFolder/%s@%p" , f . folderID , f )
2014-09-27 14:44:15 +02:00
}
2014-09-07 21:29:06 +02:00
2014-09-28 12:00:38 +01:00
// pullerIteration runs a single puller iteration for the given folder and
2014-09-27 14:44:15 +02:00
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
2014-11-23 00:02:09 +00:00
// flagged as needed in the folder.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) pullerIteration ( ignores * ignore . Matcher ) int {
2014-09-27 14:44:15 +02:00
pullChan := make ( chan pullBlockState )
copyChan := make ( chan copyBlocksState )
finisherChan := make ( chan * sharedPullerState )
2015-04-22 23:54:31 +01:00
updateWg := sync . NewWaitGroup ( )
copyWg := sync . NewWaitGroup ( )
pullWg := sync . NewWaitGroup ( )
doneWg := sync . NewWaitGroup ( )
2014-09-27 14:44:15 +02:00
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "c" , f . copiers , "p" , f . pullers )
2014-11-23 18:43:49 +00:00
2016-04-26 14:01:46 +00:00
f . dbUpdates = make ( chan dbUpdateJob )
2015-04-05 15:34:29 +02:00
updateWg . Add ( 1 )
go func ( ) {
2016-04-26 15:11:19 +00:00
// dbUpdaterRoutine finishes when f.dbUpdates is closed
2016-04-26 14:01:46 +00:00
f . dbUpdaterRoutine ( )
2015-04-05 15:34:29 +02:00
updateWg . Done ( )
} ( )
2016-04-26 14:01:46 +00:00
for i := 0 ; i < f . copiers ; i ++ {
2014-10-08 23:41:23 +01:00
copyWg . Add ( 1 )
2014-09-27 14:44:15 +02:00
go func ( ) {
// copierRoutine finishes when copyChan is closed
2016-04-26 14:01:46 +00:00
f . copierRoutine ( copyChan , pullChan , finisherChan )
2014-10-08 23:41:23 +01:00
copyWg . Done ( )
2014-09-27 14:44:15 +02:00
} ( )
}
2016-04-26 14:01:46 +00:00
for i := 0 ; i < f . pullers ; i ++ {
2014-10-08 23:41:23 +01:00
pullWg . Add ( 1 )
2014-09-27 14:44:15 +02:00
go func ( ) {
// pullerRoutine finishes when pullChan is closed
2016-04-26 14:01:46 +00:00
f . pullerRoutine ( pullChan , finisherChan )
2014-10-08 23:41:23 +01:00
pullWg . Done ( )
2014-09-27 14:44:15 +02:00
} ( )
}
2014-12-24 23:12:12 +00:00
doneWg . Add ( 1 )
// finisherRoutine finishes when finisherChan is closed
go func ( ) {
2016-04-26 14:01:46 +00:00
f . finisherRoutine ( finisherChan )
2014-12-24 23:12:12 +00:00
doneWg . Done ( )
} ( )
2014-09-27 14:44:15 +02:00
2016-04-26 14:01:46 +00:00
f . model . fmut . RLock ( )
folderFiles := f . model . folderFiles [ f . folderID ]
f . model . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
// !!!
// WithNeed takes a database snapshot (by necessity). By the time we've
// handled a bunch of files it might have become out of date and we might
// be attempting to sync with an old version of a file...
// !!!
changed := 0
2014-10-12 22:01:57 +01:00
2014-12-19 23:12:12 +00:00
fileDeletions := map [ string ] protocol . FileInfo { }
dirDeletions := [ ] protocol . FileInfo { }
buckets := map [ string ] [ ] protocol . FileInfo { }
2014-10-12 22:01:57 +01:00
2016-04-26 14:01:46 +00:00
handleFile := func ( fi protocol . FileInfo ) bool {
2014-09-27 14:44:15 +02:00
switch {
2016-04-26 14:01:46 +00:00
case fi . IsDeleted ( ) :
2014-11-09 04:26:52 +00:00
// A deleted file, directory or symlink
2016-04-26 14:01:46 +00:00
if fi . IsDirectory ( ) {
dirDeletions = append ( dirDeletions , fi )
2014-12-19 23:12:12 +00:00
} else {
2016-04-26 14:01:46 +00:00
fileDeletions [ fi . Name ] = fi
df , ok := f . model . CurrentFolderFile ( f . folderID , fi . Name )
2014-12-19 23:12:12 +00:00
// Local file can be already deleted, but with a lower version
// number, hence the deletion coming in again as part of
2015-01-30 14:32:59 +00:00
// WithNeed, furthermore, the file can simply be of the wrong
// type if we haven't yet managed to pull it.
if ok && ! df . IsDeleted ( ) && ! df . IsSymlink ( ) && ! df . IsDirectory ( ) {
2014-12-19 23:12:12 +00:00
// Put files into buckets per first hash
key := string ( df . Blocks [ 0 ] . Hash )
buckets [ key ] = append ( buckets [ key ] , df )
}
}
2016-04-26 14:01:46 +00:00
case fi . IsDirectory ( ) && ! fi . IsSymlink ( ) :
2014-09-27 14:44:15 +02:00
// A new or changed directory
2016-04-26 14:01:46 +00:00
l . Debugln ( "Creating directory" , fi . Name )
f . handleDir ( fi )
2014-09-27 14:44:15 +02:00
default :
2016-01-16 17:18:37 +00:00
return false
}
return true
}
folderFiles . WithNeed ( protocol . LocalDeviceID , func ( intf db . FileIntf ) bool {
// Needed items are delivered sorted lexicographically. We'll handle
// directories as they come along, so parents before children. Files
// are queued and the order may be changed later.
2016-08-05 07:13:52 +00:00
if shouldIgnore ( intf , ignores , f . ignoreDelete ) {
return true
}
2016-01-16 17:18:37 +00:00
2016-08-05 07:13:52 +00:00
if err := fileValid ( intf ) ; err != nil {
// The file isn't valid so we can't process it. Pretend that we
// tried and set the error for the file.
f . newError ( intf . FileName ( ) , err )
changed ++
2016-01-16 17:18:37 +00:00
return true
}
2016-08-05 07:13:52 +00:00
file := intf . ( protocol . FileInfo )
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "handling" , file . Name )
2016-01-16 17:18:37 +00:00
if ! handleFile ( file ) {
2016-07-23 12:46:31 +00:00
// A new or changed file or symlink. This is the only case where
// we do stuff concurrently in the background. We only queue
// files where we are connected to at least one device that has
// the file.
devices := folderFiles . Availability ( file . Name )
for _ , dev := range devices {
if f . model . ConnectedTo ( dev ) {
f . queue . Push ( file . Name , file . Size , file . Modified )
changed ++
break
}
}
2014-04-01 23:18:32 +02:00
}
2014-09-27 14:44:15 +02:00
return true
} )
2014-04-01 23:18:32 +02:00
2015-04-25 14:13:53 +09:00
// Reorder the file queue according to configuration
2016-04-26 14:01:46 +00:00
switch f . order {
2015-04-25 14:13:53 +09:00
case config . OrderRandom :
2016-04-26 14:01:46 +00:00
f . queue . Shuffle ( )
2015-04-25 14:13:53 +09:00
case config . OrderAlphabetic :
2016-04-26 14:01:46 +00:00
// The queue is already in alphabetic order.
2015-04-25 14:13:53 +09:00
case config . OrderSmallestFirst :
2016-04-26 14:01:46 +00:00
f . queue . SortSmallestFirst ( )
2015-04-25 14:13:53 +09:00
case config . OrderLargestFirst :
2016-04-26 14:01:46 +00:00
f . queue . SortLargestFirst ( )
2015-04-25 14:13:53 +09:00
case config . OrderOldestFirst :
2016-04-26 14:01:46 +00:00
f . queue . SortOldestFirst ( )
2015-04-25 14:13:53 +09:00
case config . OrderNewestFirst :
2016-04-26 14:01:46 +00:00
f . queue . SortNewestFirst ( )
2015-04-25 14:13:53 +09:00
}
// Process the file queue
2014-12-19 23:12:12 +00:00
nextFile :
2014-12-01 19:23:06 +00:00
for {
2016-01-16 21:42:32 +01:00
select {
2016-04-26 14:01:46 +00:00
case <- f . stop :
2016-01-16 21:42:32 +01:00
// Stop processing files if the puller has been told to stop.
break
default :
}
2016-04-26 14:01:46 +00:00
fileName , ok := f . queue . Pop ( )
2014-12-30 09:31:34 +01:00
if ! ok {
2014-12-01 19:23:06 +00:00
break
}
2014-12-19 23:12:12 +00:00
2016-04-26 14:01:46 +00:00
fi , ok := f . model . CurrentGlobalFile ( f . folderID , fileName )
2014-12-19 23:12:12 +00:00
if ! ok {
2015-01-06 22:12:45 +01:00
// File is no longer in the index. Mark it as done and drop it.
2016-04-26 14:01:46 +00:00
f . queue . Done ( fileName )
2014-12-19 23:12:12 +00:00
continue
}
2016-01-16 17:18:37 +00:00
// Handles races where an index update arrives changing what the file
// is between queueing and retrieving it from the queue, effectively
// changing how the file should be handled.
2016-04-26 14:01:46 +00:00
if handleFile ( fi ) {
2016-01-16 17:18:37 +00:00
continue
}
2016-04-26 14:01:46 +00:00
if ! fi . IsSymlink ( ) {
key := string ( fi . Blocks [ 0 ] . Hash )
2014-12-19 23:12:12 +00:00
for i , candidate := range buckets [ key ] {
2016-04-26 14:01:46 +00:00
if scanner . BlocksEqual ( candidate . Blocks , fi . Blocks ) {
2014-12-19 23:12:12 +00:00
// Remove the candidate from the bucket
2015-01-30 14:32:59 +00:00
lidx := len ( buckets [ key ] ) - 1
buckets [ key ] [ i ] = buckets [ key ] [ lidx ]
buckets [ key ] = buckets [ key ] [ : lidx ]
// candidate is our current state of the file, where as the
// desired state with the delete bit set is in the deletion
// map.
desired := fileDeletions [ candidate . Name ]
2014-12-19 23:12:12 +00:00
// Remove the pending deletion (as we perform it by renaming)
delete ( fileDeletions , candidate . Name )
2016-04-26 14:01:46 +00:00
f . renameFile ( desired , fi )
2014-12-19 23:12:12 +00:00
2016-04-26 14:01:46 +00:00
f . queue . Done ( fileName )
2014-12-19 23:12:12 +00:00
continue nextFile
}
}
2015-01-06 22:12:45 +01:00
}
2014-12-19 23:12:12 +00:00
// Not a rename or a symlink, deal with it.
2016-04-26 14:01:46 +00:00
f . handleFile ( fi , copyChan , finisherChan )
2014-12-01 19:23:06 +00:00
}
2014-09-27 14:44:15 +02:00
// Signal copy and puller routines that we are done with the in data for
2014-10-08 23:41:23 +01:00
// this iteration. Wait for them to finish.
2014-09-27 14:44:15 +02:00
close ( copyChan )
2014-10-08 23:41:23 +01:00
copyWg . Wait ( )
2014-09-27 14:44:15 +02:00
close ( pullChan )
2014-10-08 23:41:23 +01:00
pullWg . Wait ( )
2014-04-01 23:18:32 +02:00
2014-10-08 23:41:23 +01:00
// Signal the finisher chan that there will be no more input.
2014-09-27 14:44:15 +02:00
close ( finisherChan )
2014-04-01 23:18:32 +02:00
2014-09-27 14:44:15 +02:00
// Wait for the finisherChan to finish.
doneWg . Wait ( )
2014-05-19 22:31:28 +02:00
2014-12-19 23:12:12 +00:00
for _ , file := range fileDeletions {
2015-10-03 17:25:21 +02:00
l . Debugln ( "Deleting file" , file . Name )
2016-04-26 14:01:46 +00:00
f . deleteFile ( file )
2014-12-19 23:12:12 +00:00
}
for i := range dirDeletions {
2015-01-30 14:32:59 +00:00
dir := dirDeletions [ len ( dirDeletions ) - i - 1 ]
2015-10-03 17:25:21 +02:00
l . Debugln ( "Deleting dir" , dir . Name )
2016-04-26 14:01:46 +00:00
f . deleteDir ( dir , ignores )
2014-10-12 22:01:57 +01:00
}
2015-04-05 15:34:29 +02:00
// Wait for db updates to complete
2016-04-26 14:01:46 +00:00
close ( f . dbUpdates )
2015-04-05 15:34:29 +02:00
updateWg . Wait ( )
2014-09-27 14:44:15 +02:00
return changed
}
2014-04-01 23:18:32 +02:00
2014-09-27 14:44:15 +02:00
// handleDir creates or updates the given directory
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) handleDir ( file protocol . FileInfo ) {
2015-02-01 17:31:19 +00:00
var err error
2015-06-14 22:59:21 +02:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-04-14 20:59:06 +09:00
"item" : file . Name ,
"type" : "dir" ,
"action" : "update" ,
2015-02-01 17:31:19 +00:00
} )
2015-04-14 20:59:06 +09:00
2015-02-01 17:31:19 +00:00
defer func ( ) {
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-02-01 17:31:19 +00:00
"item" : file . Name ,
2015-05-27 11:14:39 +02:00
"error" : events . Error ( err ) ,
2015-04-14 20:59:06 +09:00
"type" : "dir" ,
"action" : "update" ,
2015-02-01 17:31:19 +00:00
} )
} ( )
2016-04-26 14:01:46 +00:00
realName := filepath . Join ( f . dir , file . Name )
2016-07-04 10:40:29 +00:00
mode := os . FileMode ( file . Permissions & 0777 )
2016-04-26 14:01:46 +00:00
if f . ignorePermissions ( file ) {
2015-05-25 00:12:51 +02:00
mode = 0777
2014-10-10 00:34:32 +02:00
}
2014-05-19 22:31:28 +02:00
2015-10-03 17:25:21 +02:00
if shouldDebug ( ) {
2016-04-26 14:01:46 +00:00
curFile , _ := f . model . CurrentFolderFile ( f . folderID , file . Name )
2014-09-27 14:44:15 +02:00
l . Debugf ( "need dir\n\t%v\n\t%v" , file , curFile )
2014-04-01 23:18:32 +02:00
}
2015-04-14 19:31:25 +09:00
info , err := osutil . Lstat ( realName )
2014-11-13 22:59:40 +00:00
switch {
2014-11-17 11:25:32 +00:00
// There is already something under that name, but it's a file/link.
// Most likely a file/link is getting replaced with a directory.
// Remove the file/link and fall through to directory creation.
2014-12-04 23:02:57 +00:00
case err == nil && ( ! info . IsDir ( ) || info . Mode ( ) & os . ModeSymlink != 0 ) :
2015-04-16 22:07:04 +01:00
err = osutil . InWritableDir ( osutil . Remove , realName )
2014-11-13 22:59:40 +00:00
if err != nil {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, dir %q): %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-09-28 01:54:25 +02:00
return
}
2014-11-13 22:59:40 +00:00
fallthrough
// The directory doesn't exist, so we create it with the right
// mode bits from the start.
case err != nil && os . IsNotExist ( err ) :
// We declare a function that acts on only the path name, so
// we can pass it to InWritableDir. We use a regular Mkdir and
// not MkdirAll because the parent should already exist.
mkdir := func ( path string ) error {
2015-03-23 21:31:53 +09:00
err = os . Mkdir ( path , mode )
2016-04-26 14:01:46 +00:00
if err != nil || f . ignorePermissions ( file ) {
2015-03-23 21:31:53 +09:00
return err
}
2015-07-03 10:25:35 +01:00
// Stat the directory so we can check its permissions.
info , err := osutil . Lstat ( path )
if err != nil {
return err
}
// Mask for the bits we want to preserve and add them in to the
// directories permissions.
return os . Chmod ( path , mode | ( info . Mode ( ) & retainBits ) )
2014-11-13 22:59:40 +00:00
}
2014-09-28 01:54:25 +02:00
2014-11-13 22:59:40 +00:00
if err = osutil . InWritableDir ( mkdir , realName ) ; err == nil {
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateHandleDir }
2014-11-13 22:59:40 +00:00
} else {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, dir %q): %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-11-13 22:59:40 +00:00
}
2014-09-28 01:54:25 +02:00
return
2014-11-13 22:59:40 +00:00
// Weird error when stat()'ing the dir. Probably won't work to do
// anything else with it if we can't even stat() it.
case err != nil :
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, dir %q): %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-03-28 14:36:57 +01:00
return
}
2014-09-28 01:54:25 +02:00
// The directory already exists, so we just correct the mode bits. (We
// don't handle modification times on directories, because that sucks...)
// It's OK to change mode bits on stuff within non-writable directories.
2016-04-26 14:01:46 +00:00
if f . ignorePermissions ( file ) {
f . dbUpdates <- dbUpdateJob { file , dbUpdateHandleDir }
2015-07-03 10:25:35 +01:00
} else if err := os . Chmod ( realName , mode | ( info . Mode ( ) & retainBits ) ) ; err == nil {
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateHandleDir }
2014-09-28 01:54:25 +02:00
} else {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, dir %q): %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-07-15 13:04:37 +02:00
}
2014-09-27 14:44:15 +02:00
}
2014-03-28 14:36:57 +01:00
2014-09-27 14:44:15 +02:00
// deleteDir attempts to delete the given directory
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) deleteDir ( file protocol . FileInfo , matcher * ignore . Matcher ) {
2015-02-01 17:31:19 +00:00
var err error
2015-06-14 22:59:21 +02:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-04-14 20:59:06 +09:00
"item" : file . Name ,
"type" : "dir" ,
"action" : "delete" ,
2015-02-01 17:31:19 +00:00
} )
defer func ( ) {
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-02-01 17:31:19 +00:00
"item" : file . Name ,
2015-05-27 11:14:39 +02:00
"error" : events . Error ( err ) ,
2015-04-14 20:59:06 +09:00
"type" : "dir" ,
"action" : "delete" ,
2015-02-01 17:31:19 +00:00
} )
} ( )
2016-04-26 14:01:46 +00:00
realName := filepath . Join ( f . dir , file . Name )
2014-10-29 11:19:48 +00:00
// Delete any temporary files lying around in the directory
dir , _ := os . Open ( realName )
if dir != nil {
files , _ := dir . Readdirnames ( - 1 )
2016-04-07 09:34:07 +00:00
for _ , dirFile := range files {
2016-05-23 23:32:08 +00:00
fullDirFile := filepath . Join ( file . Name , dirFile )
if defTempNamer . IsTemporary ( dirFile ) || ( matcher != nil && matcher . Match ( fullDirFile ) . IsDeletable ( ) ) {
2016-05-28 04:17:34 +00:00
osutil . RemoveAll ( filepath . Join ( f . dir , fullDirFile ) )
2014-10-29 11:19:48 +00:00
}
}
2015-11-30 18:57:08 +03:00
dir . Close ( )
2014-10-29 11:19:48 +00:00
}
2015-05-23 23:55:50 +02:00
2015-04-16 22:07:04 +01:00
err = osutil . InWritableDir ( osutil . Remove , realName )
2014-09-27 14:44:15 +02:00
if err == nil || os . IsNotExist ( err ) {
2015-05-23 23:55:50 +02:00
// It was removed or it doesn't exist to start with
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateDeleteDir }
2015-06-15 10:22:44 +01:00
} else if _ , serr := os . Lstat ( realName ) ; serr != nil && ! os . IsPermission ( serr ) {
2015-05-23 23:55:50 +02:00
// We get an error just looking at the directory, and it's not a
// permission problem. Lets assume the error is in fact some variant
// of "file does not exist" (possibly expressed as some parent being a
// file and not a directory etc) and that the delete is handled.
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateDeleteDir }
2014-10-10 21:20:46 +02:00
} else {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, dir %q): delete: %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-03-28 14:36:57 +01:00
}
}
2014-09-27 14:44:15 +02:00
// deleteFile attempts to delete the given file
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) deleteFile ( file protocol . FileInfo ) {
2015-02-01 17:31:19 +00:00
var err error
2015-06-14 22:59:21 +02:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-04-14 20:59:06 +09:00
"item" : file . Name ,
"type" : "file" ,
"action" : "delete" ,
2015-02-01 17:31:19 +00:00
} )
defer func ( ) {
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-02-01 17:31:19 +00:00
"item" : file . Name ,
2015-05-27 11:14:39 +02:00
"error" : events . Error ( err ) ,
2015-04-14 20:59:06 +09:00
"type" : "file" ,
"action" : "delete" ,
2015-02-01 17:31:19 +00:00
} )
} ( )
2016-04-26 14:01:46 +00:00
realName := filepath . Join ( f . dir , file . Name )
2014-04-01 23:18:32 +02:00
2016-04-26 14:01:46 +00:00
cur , ok := f . model . CurrentFolderFile ( f . folderID , file . Name )
if ok && f . inConflict ( cur . Version , file . Version ) {
2015-04-09 12:53:41 +02:00
// There is a conflict here. Move the file to a conflict copy instead
// of deleting. Also merge with the version vector we had, to indicate
// we have resolved the conflict.
file . Version = file . Version . Merge ( cur . Version )
2016-04-26 14:01:46 +00:00
err = osutil . InWritableDir ( f . moveForConflict , realName )
} else if f . versioner != nil {
err = osutil . InWritableDir ( f . versioner . Archive , realName )
2014-09-27 14:44:15 +02:00
} else {
2015-04-16 22:07:04 +01:00
err = osutil . InWritableDir ( osutil . Remove , realName )
2014-09-27 14:44:15 +02:00
}
2014-07-13 21:07:24 +02:00
2015-05-23 23:55:50 +02:00
if err == nil || os . IsNotExist ( err ) {
// It was removed or it doesn't exist to start with
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateDeleteFile }
2015-06-15 10:22:44 +01:00
} else if _ , serr := os . Lstat ( realName ) ; serr != nil && ! os . IsPermission ( serr ) {
2015-05-23 23:55:50 +02:00
// We get an error just looking at the file, and it's not a permission
// problem. Lets assume the error is in fact some variant of "file
// does not exist" (possibly expressed as some parent being a file and
// not a directory etc) and that the delete is handled.
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateDeleteFile }
2015-05-23 23:55:50 +02:00
} else {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, file %q): delete: %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-05-28 11:45:45 +02:00
}
2014-09-27 14:44:15 +02:00
}
2014-05-28 11:45:45 +02:00
2014-12-19 23:12:12 +00:00
// renameFile attempts to rename an existing file to a destination
// and set the right attributes on it.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) renameFile ( source , target protocol . FileInfo ) {
2015-02-01 17:31:19 +00:00
var err error
2015-06-14 22:59:21 +02:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-04-14 20:59:06 +09:00
"item" : source . Name ,
"type" : "file" ,
"action" : "delete" ,
2015-02-01 17:31:19 +00:00
} )
2015-06-14 22:59:21 +02:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-04-14 20:59:06 +09:00
"item" : target . Name ,
"type" : "file" ,
"action" : "update" ,
2015-02-01 17:31:19 +00:00
} )
defer func ( ) {
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-02-01 17:31:19 +00:00
"item" : source . Name ,
2015-05-27 11:14:39 +02:00
"error" : events . Error ( err ) ,
2015-04-14 20:59:06 +09:00
"type" : "file" ,
"action" : "delete" ,
2015-02-01 17:31:19 +00:00
} )
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-02-01 17:31:19 +00:00
"item" : target . Name ,
2015-05-27 11:14:39 +02:00
"error" : events . Error ( err ) ,
2015-04-14 20:59:06 +09:00
"type" : "file" ,
"action" : "update" ,
2015-02-01 17:31:19 +00:00
} )
} ( )
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "taking rename shortcut" , source . Name , "->" , target . Name )
2014-12-19 23:12:12 +00:00
2016-04-26 14:01:46 +00:00
from := filepath . Join ( f . dir , source . Name )
to := filepath . Join ( f . dir , target . Name )
2014-12-19 23:12:12 +00:00
2016-04-26 14:01:46 +00:00
if f . versioner != nil {
2014-12-19 23:12:12 +00:00
err = osutil . Copy ( from , to )
if err == nil {
2016-04-26 14:01:46 +00:00
err = osutil . InWritableDir ( f . versioner . Archive , from )
2014-12-19 23:12:12 +00:00
}
} else {
err = osutil . TryRename ( from , to )
}
2015-03-01 10:46:28 +01:00
if err == nil {
// The file was renamed, so we have handled both the necessary delete
// of the source and the creation of the target. Fix-up the metadata,
// and update the local index of the target file.
2014-12-19 23:12:12 +00:00
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { source , dbUpdateDeleteFile }
2015-03-01 10:46:28 +01:00
2016-04-26 14:01:46 +00:00
err = f . shortcutFile ( target )
2015-03-01 10:46:28 +01:00
if err != nil {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, file %q): rename from %q metadata: %v" , f . folderID , target . Name , source . Name , err )
f . newError ( target . Name , err )
2015-03-01 10:46:28 +01:00
return
}
2015-06-16 12:12:34 +01:00
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { target , dbUpdateHandleFile }
2015-03-01 10:46:28 +01:00
} else {
// We failed the rename so we have a source file that we still need to
// get rid of. Attempt to delete it instead so that we make *some*
// progress. The target is unhandled.
2014-12-19 23:12:12 +00:00
2015-04-16 22:07:04 +01:00
err = osutil . InWritableDir ( osutil . Remove , from )
2015-03-01 10:46:28 +01:00
if err != nil {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, file %q): delete %q after failed rename: %v" , f . folderID , target . Name , source . Name , err )
f . newError ( target . Name , err )
2015-03-01 10:46:28 +01:00
return
}
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { source , dbUpdateDeleteFile }
2015-03-01 10:46:28 +01:00
}
2014-12-19 23:12:12 +00:00
}
2015-05-27 11:14:39 +02:00
// This is the flow of data and events here, I think...
//
// +-----------------------+
// | | - - - - > ItemStarted
// | handleFile | - - - - > ItemFinished (on shortcuts)
// | |
// +-----------------------+
// |
// | copyChan (copyBlocksState; unless shortcut taken)
// |
// | +-----------------------+
// | | +-----------------------+
// +--->| | |
// | | copierRoutine |
// +-| |
// +-----------------------+
// |
// | pullChan (sharedPullerState)
// |
// | +-----------------------+
// | | +-----------------------+
// +-->| | |
// | | pullerRoutine |
// +-| |
// +-----------------------+
// |
// | finisherChan (sharedPullerState)
// |
// | +-----------------------+
// | | |
// +-->| finisherRoutine | - - - - > ItemFinished
// | |
// +-----------------------+
2014-09-27 14:44:15 +02:00
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) handleFile ( file protocol . FileInfo , copyChan chan <- copyBlocksState , finisherChan chan <- * sharedPullerState ) {
curFile , hasCurFile := f . model . CurrentFolderFile ( f . folderID , file . Name )
2014-03-28 14:36:57 +01:00
2015-10-29 09:08:03 +01:00
if hasCurFile && len ( curFile . Blocks ) == len ( file . Blocks ) && scanner . BlocksEqual ( curFile . Blocks , file . Blocks ) {
2014-09-27 14:44:15 +02:00
// We are supposed to copy the entire file, and then fetch nothing. We
// are only updating metadata, so we don't actually *need* to make the
// copy.
2016-04-26 14:01:46 +00:00
l . Debugln ( f , "taking shortcut on" , file . Name )
2015-06-14 22:56:41 +02:00
2015-06-14 22:59:21 +02:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-06-14 22:56:41 +02:00
"item" : file . Name ,
"type" : "file" ,
"action" : "metadata" ,
} )
2016-04-26 14:01:46 +00:00
f . queue . Done ( file . Name )
2015-06-14 22:56:41 +02:00
2015-02-01 17:31:19 +00:00
var err error
2014-11-09 04:26:52 +00:00
if file . IsSymlink ( ) {
2016-04-26 14:01:46 +00:00
err = f . shortcutSymlink ( file )
2014-11-09 04:26:52 +00:00
} else {
2016-04-26 14:01:46 +00:00
err = f . shortcutFile ( file )
2014-11-09 04:26:52 +00:00
}
2015-06-14 22:56:41 +02:00
2015-02-01 17:31:19 +00:00
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-02-01 17:31:19 +00:00
"item" : file . Name ,
2015-05-27 11:14:39 +02:00
"error" : events . Error ( err ) ,
2015-04-14 20:59:06 +09:00
"type" : "file" ,
2015-06-14 22:56:41 +02:00
"action" : "metadata" ,
2015-02-01 17:31:19 +00:00
} )
2015-06-16 12:12:34 +01:00
if err != nil {
2015-06-18 11:55:04 +02:00
l . Infoln ( "Puller: shortcut:" , err )
2016-04-26 14:01:46 +00:00
f . newError ( file . Name , err )
2015-06-18 11:55:04 +02:00
} else {
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { file , dbUpdateShortcutFile }
2015-06-16 12:12:34 +01:00
}
2014-09-27 14:44:15 +02:00
return
2014-03-28 14:36:57 +01:00
}
2015-10-29 09:08:03 +01:00
// Figure out the absolute filenames we need once and for all
2016-04-26 14:01:46 +00:00
tempName := filepath . Join ( f . dir , defTempNamer . TempName ( file . Name ) )
realName := filepath . Join ( f . dir , file . Name )
2015-10-29 09:08:03 +01:00
2015-11-04 19:53:07 +00:00
if hasCurFile && ! curFile . IsDirectory ( ) && ! curFile . IsSymlink ( ) {
2015-10-29 09:08:03 +01:00
// Check that the file on disk is what we expect it to be according to
// the database. If there's a mismatch here, there might be local
// changes that we don't know about yet and we should scan before
// touching the file. If we can't stat the file we'll just pull it.
if info , err := osutil . Lstat ( realName ) ; err == nil {
2016-04-26 14:01:46 +00:00
mtime := f . virtualMtimeRepo . GetMtime ( file . Name , info . ModTime ( ) )
2016-07-04 10:40:29 +00:00
if mtime . Unix ( ) != curFile . Modified || info . Size ( ) != curFile . Size {
2015-10-29 09:08:03 +01:00
l . Debugln ( "file modified but not rescanned; not pulling:" , realName )
// Scan() is synchronous (i.e. blocks until the scan is
// completed and returns an error), but a scan can't happen
// while we're in the puller routine. Request the scan in the
// background and it'll be handled when the current pulling
// sweep is complete. As we do retries, we'll queue the scan
// for this file up to ten times, but the last nine of those
// scans will be cheap...
2016-04-26 14:01:46 +00:00
go f . scan . Scan ( [ ] string { file . Name } )
2015-10-29 09:08:03 +01:00
return
}
}
}
2014-10-17 23:16:29 +01:00
scanner . PopulateOffsets ( file . Blocks )
2014-10-08 23:41:23 +01:00
var blocks [ ] protocol . BlockInfo
2015-12-21 13:29:18 -05:00
var blocksSize int64
2016-04-15 10:59:41 +00:00
var reused [ ] int32
2014-10-03 23:15:54 +01:00
// Check for an old temporary file which might have some blocks we could
// reuse.
2016-07-26 08:51:39 +00:00
tempBlocks , err := scanner . HashFile ( tempName , protocol . BlockSize , nil )
2014-10-03 23:15:54 +01:00
if err == nil {
// Check for any reusable blocks in the temp file
tempCopyBlocks , _ := scanner . BlockDiff ( tempBlocks , file . Blocks )
// block.String() returns a string unique to the block
2015-01-14 23:00:00 +00:00
existingBlocks := make ( map [ string ] struct { } , len ( tempCopyBlocks ) )
2014-10-03 23:15:54 +01:00
for _ , block := range tempCopyBlocks {
2015-01-14 23:00:00 +00:00
existingBlocks [ block . String ( ) ] = struct { } { }
2014-10-03 23:15:54 +01:00
}
2014-10-08 23:41:23 +01:00
// Since the blocks are already there, we don't need to get them.
2016-04-15 10:59:41 +00:00
for i , block := range file . Blocks {
2014-10-03 23:15:54 +01:00
_ , ok := existingBlocks [ block . String ( ) ]
if ! ok {
2014-10-08 23:41:23 +01:00
blocks = append ( blocks , block )
2015-12-21 13:29:18 -05:00
blocksSize += int64 ( block . Size )
2016-04-15 10:59:41 +00:00
} else {
reused = append ( reused , int32 ( i ) )
2014-10-03 23:15:54 +01:00
}
}
2014-10-12 21:38:22 +01:00
// The sharedpullerstate will know which flags to use when opening the
// temp file depending if we are reusing any blocks or not.
2016-04-15 10:59:41 +00:00
if len ( reused ) == 0 {
2014-10-03 23:15:54 +01:00
// Otherwise, discard the file ourselves in order for the
2015-04-28 18:34:55 +03:00
// sharedpuller not to panic when it fails to exclusively create a
2014-10-03 23:15:54 +01:00
// file which already exists
2015-08-22 14:18:19 +01:00
osutil . InWritableDir ( osutil . Remove , tempName )
2014-10-03 23:15:54 +01:00
}
2014-10-08 23:41:23 +01:00
} else {
2016-04-15 10:59:41 +00:00
// Copy the blocks, as we don't want to shuffle them on the FileInfo
blocks = append ( blocks , file . Blocks ... )
2016-07-04 10:40:29 +00:00
blocksSize = file . Size
2014-10-03 23:15:54 +01:00
}
2016-04-26 14:01:46 +00:00
if f . checkFreeSpace {
if free , err := osutil . DiskFreeBytes ( f . dir ) ; err == nil && free < blocksSize {
l . Warnf ( ` Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB ` , f . folderID , f . dir , file . Name , float64 ( free ) / 1024 / 1024 , float64 ( blocksSize ) / 1024 / 1024 )
f . newError ( file . Name , errors . New ( "insufficient space" ) )
2015-12-21 13:29:18 -05:00
return
}
}
2016-04-15 10:59:41 +00:00
// Shuffle the blocks
for i := range blocks {
j := rand . Intn ( i + 1 )
blocks [ i ] , blocks [ j ] = blocks [ j ] , blocks [ i ]
}
2015-12-21 13:29:18 -05:00
events . Default . Log ( events . ItemStarted , map [ string ] string {
2016-04-26 14:01:46 +00:00
"folder" : f . folderID ,
2015-12-21 13:29:18 -05:00
"item" : file . Name ,
"type" : "file" ,
"action" : "update" ,
} )
2014-09-27 14:44:15 +02:00
s := sharedPullerState {
2016-04-15 10:59:41 +00:00
file : file ,
2016-04-26 14:01:46 +00:00
folder : f . folderID ,
2016-04-15 10:59:41 +00:00
tempName : tempName ,
realName : realName ,
copyTotal : len ( blocks ) ,
copyNeeded : len ( blocks ) ,
reused : len ( reused ) ,
updated : time . Now ( ) ,
available : reused ,
availableUpdated : time . Now ( ) ,
2016-04-26 14:01:46 +00:00
ignorePerms : f . ignorePermissions ( file ) ,
2016-04-15 10:59:41 +00:00
version : curFile . Version ,
mut : sync . NewRWMutex ( ) ,
2016-04-26 14:01:46 +00:00
sparse : f . allowSparse ,
2016-05-22 10:16:09 +00:00
created : time . Now ( ) ,
2014-03-28 14:36:57 +01:00
}
2016-04-26 14:01:46 +00:00
l . Debugf ( "%v need file %s; copy %d, reused %v" , f , file . Name , len ( blocks ) , reused )
2014-03-28 14:36:57 +01:00
2014-10-08 23:41:23 +01:00
cs := copyBlocksState {
sharedPullerState : & s ,
blocks : blocks ,
2014-10-06 10:14:36 +02:00
}
2014-10-08 23:41:23 +01:00
copyChan <- cs
2014-03-28 14:36:57 +01:00
}
2014-09-27 14:44:15 +02:00
// shortcutFile sets file mode and modification time, when that's the only
// thing that has changed.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) shortcutFile ( file protocol . FileInfo ) error {
realName := filepath . Join ( f . dir , file . Name )
if ! f . ignorePermissions ( file ) {
2016-07-04 10:40:29 +00:00
if err := os . Chmod ( realName , os . FileMode ( file . Permissions & 0777 ) ) ; err != nil {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, file %q): shortcut: chmod: %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2015-05-13 14:57:29 +00:00
return err
2014-10-10 00:34:32 +02:00
}
2014-04-27 12:14:53 +02:00
}
2014-03-28 14:36:57 +01:00
2014-09-27 14:44:15 +02:00
t := time . Unix ( file . Modified , 0 )
2015-05-13 14:57:29 +00:00
if err := os . Chtimes ( realName , t , t ) ; err != nil {
// Try using virtual mtimes
info , err := os . Stat ( realName )
if err != nil {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, file %q): shortcut: unable to stat file: %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2015-05-13 14:57:29 +00:00
return err
2014-10-14 08:48:35 +02:00
}
2015-05-13 14:57:29 +00:00
2016-04-26 14:01:46 +00:00
f . virtualMtimeRepo . UpdateMtime ( file . Name , info . ModTime ( ) , t )
2014-03-28 14:36:57 +01:00
}
2015-04-09 12:53:41 +02:00
// This may have been a conflict. We should merge the version vectors so
// that our clock doesn't move backwards.
2016-04-26 14:01:46 +00:00
if cur , ok := f . model . CurrentFolderFile ( f . folderID , file . Name ) ; ok {
2015-04-09 12:53:41 +02:00
file . Version = file . Version . Merge ( cur . Version )
}
2015-05-13 14:57:29 +00:00
return nil
2014-03-28 14:36:57 +01:00
}
2015-04-28 18:34:55 +03:00
// shortcutSymlink changes the symlinks type if necessary.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) shortcutSymlink ( file protocol . FileInfo ) ( err error ) {
2015-09-04 12:54:01 +02:00
tt := symlinks . TargetFile
if file . IsDirectory ( ) {
tt = symlinks . TargetDirectory
}
2016-04-26 14:01:46 +00:00
err = symlinks . ChangeType ( filepath . Join ( f . dir , file . Name ) , tt )
2015-06-16 12:12:34 +01:00
if err != nil {
2016-04-26 14:01:46 +00:00
l . Infof ( "Puller (folder %q, file %q): symlink shortcut: %v" , f . folderID , file . Name , err )
f . newError ( file . Name , err )
2014-11-09 04:26:52 +00:00
}
2015-02-01 17:31:19 +00:00
return
2014-11-09 04:26:52 +00:00
}
2014-10-08 23:41:23 +01:00
// copierRoutine reads copierStates until the in channel closes and performs
// the relevant copies when possible, or passes it to the puller routine.
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) copierRoutine ( in <- chan copyBlocksState , pullChan chan <- pullBlockState , out chan <- * sharedPullerState ) {
2014-09-29 23:01:17 +01:00
buf := make ( [ ] byte , protocol . BlockSize )
2014-03-28 14:36:57 +01:00
2014-09-27 14:44:15 +02:00
for state := range in {
dstFd , err := state . tempFile ( )
if err != nil {
2015-05-27 11:14:39 +02:00
// Nothing more to do for this failed file, since we couldn't create a temporary for it.
2015-01-07 23:12:12 +00:00
out <- state . sharedPullerState
continue
2014-11-16 23:18:59 +00:00
}
2016-04-26 14:01:46 +00:00
if f . model . progressEmitter != nil {
f . model . progressEmitter . Register ( state . sharedPullerState )
2015-05-27 11:14:39 +02:00
}
2014-11-09 19:03:56 +00:00
folderRoots := make ( map [ string ] string )
2015-09-04 12:01:00 +02:00
var folders [ ] string
2016-04-26 14:01:46 +00:00
f . model . fmut . RLock ( )
for folder , cfg := range f . model . folderCfgs {
2015-04-05 22:52:22 +02:00
folderRoots [ folder ] = cfg . Path ( )
2015-09-04 12:01:00 +02:00
folders = append ( folders , folder )
2014-11-09 19:03:56 +00:00
}
2016-04-26 14:01:46 +00:00
f . model . fmut . RUnlock ( )
2014-11-09 19:03:56 +00:00
2014-09-27 14:44:15 +02:00
for _ , block := range state . blocks {
2016-04-26 14:01:46 +00:00
if f . allowSparse && state . reused == 0 && block . IsEmpty ( ) {
2015-11-21 16:30:53 +01:00
// The block is a block of all zeroes, and we are not reusing
// a temp file, so there is no need to do anything with it.
// If we were reusing a temp file and had this block to copy,
// it would be because the block in the temp file was *not* a
// block of all zeroes, so then we should not skip it.
// Pretend we copied it.
state . copiedFromOrigin ( )
continue
}
2014-09-27 14:44:15 +02:00
buf = buf [ : int ( block . Size ) ]
2016-04-26 14:01:46 +00:00
found := f . model . finder . Iterate ( folders , block . Hash , func ( folder , file string , index int32 ) bool {
2015-01-28 14:32:59 +00:00
fd , err := os . Open ( filepath . Join ( folderRoots [ folder ] , file ) )
if err != nil {
return false
2014-10-08 23:41:23 +01:00
}
_ , err = fd . ReadAt ( buf , protocol . BlockSize * int64 ( index ) )
2015-01-28 14:32:59 +00:00
fd . Close ( )
2014-10-08 23:41:23 +01:00
if err != nil {
return false
}
2014-12-28 23:11:32 +00:00
hash , err := scanner . VerifyBuffer ( buf , block )
if err != nil {
if hash != nil {
2015-10-03 17:25:21 +02:00
l . Debugf ( "Finder block mismatch in %s:%s:%d expected %q got %q" , folder , file , index , block . Hash , hash )
2016-04-26 14:01:46 +00:00
err = f . model . finder . Fix ( folder , file , index , block . Hash , hash )
2014-10-24 23:20:08 +01:00
if err != nil {
l . Warnln ( "finder fix:" , err )
}
2015-10-03 17:25:21 +02:00
} else {
2014-12-28 23:11:32 +00:00
l . Debugln ( "Finder failed to verify buffer" , err )
2014-10-24 23:20:08 +01:00
}
2014-12-28 23:11:32 +00:00
return false
2014-10-24 23:20:08 +01:00
}
2014-10-08 23:41:23 +01:00
_ , err = dstFd . WriteAt ( buf , block . Offset )
if err != nil {
2015-01-07 23:12:12 +00:00
state . fail ( "dst write" , err )
2014-10-08 23:41:23 +01:00
}
2014-10-12 21:38:22 +01:00
if file == state . file . Name {
state . copiedFromOrigin ( )
}
2014-10-08 23:41:23 +01:00
return true
} )
if state . failed ( ) != nil {
break
2014-08-27 07:00:15 +02:00
}
2014-09-27 14:44:15 +02:00
2014-10-24 23:20:08 +01:00
if ! found {
2014-10-08 23:41:23 +01:00
state . pullStarted ( )
ps := pullBlockState {
sharedPullerState : state . sharedPullerState ,
block : block ,
}
pullChan <- ps
2014-10-12 21:38:22 +01:00
} else {
2016-04-15 10:59:41 +00:00
state . copyDone ( block )
2014-05-25 20:49:08 +02:00
}
2014-05-19 23:42:08 +02:00
}
2014-09-27 14:44:15 +02:00
out <- state . sharedPullerState
2014-03-28 14:36:57 +01:00
}
}
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) pullerRoutine ( in <- chan pullBlockState , out chan <- * sharedPullerState ) {
2014-09-27 14:44:15 +02:00
for state := range in {
if state . failed ( ) != nil {
2015-05-27 11:14:39 +02:00
out <- state . sharedPullerState
2014-12-28 23:11:32 +00:00
continue
2014-09-27 14:44:15 +02:00
}
2014-07-24 09:38:16 +02:00
2015-04-28 18:34:55 +03:00
// Get an fd to the temporary file. Technically we don't need it until
2014-09-27 14:44:15 +02:00
// after fetching the block, but if we run into an error here there is
// no point in issuing the request to the network.
fd , err := state . tempFile ( )
if err != nil {
2015-05-27 11:14:39 +02:00
out <- state . sharedPullerState
2014-12-28 23:11:32 +00:00
continue
2014-07-24 09:38:16 +02:00
}
2014-08-05 09:46:11 +02:00
2016-04-26 14:01:46 +00:00
if f . allowSparse && state . reused == 0 && state . block . IsEmpty ( ) {
2015-11-21 16:30:53 +01:00
// There is no need to request a block of all zeroes. Pretend we
// requested it and handled it correctly.
2016-04-15 10:59:41 +00:00
state . pullDone ( state . block )
2015-11-21 16:30:53 +01:00
out <- state . sharedPullerState
continue
}
2014-12-28 23:11:32 +00:00
var lastError error
2016-04-26 14:01:46 +00:00
candidates := f . model . Availability ( f . folderID , state . file . Name , state . file . Version , state . block )
2014-12-28 23:11:32 +00:00
for {
// Select the least busy device to pull the block from. If we found no
// feasible device at all, fail the block (and in the long run, the
// file).
2016-04-15 10:59:41 +00:00
selected , found := activity . leastBusy ( candidates )
if ! found {
2014-12-28 23:11:32 +00:00
if lastError != nil {
2015-01-07 23:12:12 +00:00
state . fail ( "pull" , lastError )
2014-12-28 23:11:32 +00:00
} else {
2015-01-07 23:12:12 +00:00
state . fail ( "pull" , errNoDevice )
2014-12-28 23:11:32 +00:00
}
break
}
2014-08-05 09:46:11 +02:00
2016-04-15 10:59:41 +00:00
candidates = removeAvailability ( candidates , selected )
2014-07-24 09:38:16 +02:00
2014-12-28 23:11:32 +00:00
// Fetch the block, while marking the selected device as in use so that
// leastBusy can select another device when someone else asks.
activity . using ( selected )
2016-04-26 14:01:46 +00:00
buf , lastError := f . model . requestGlobal ( selected . ID , f . folderID , state . file . Name , state . block . Offset , int ( state . block . Size ) , state . block . Hash , selected . FromTemporary )
2014-12-28 23:11:32 +00:00
activity . done ( selected )
if lastError != nil {
2016-04-26 14:01:46 +00:00
l . Debugln ( "request:" , f . folderID , state . file . Name , state . block . Offset , state . block . Size , "returned error:" , lastError )
2014-12-28 23:11:32 +00:00
continue
}
// Verify that the received block matches the desired hash, if not
// try pulling it from another device.
_ , lastError = scanner . VerifyBuffer ( buf , state . block )
if lastError != nil {
2016-04-26 14:01:46 +00:00
l . Debugln ( "request:" , f . folderID , state . file . Name , state . block . Offset , state . block . Size , "hash mismatch" )
2014-12-28 23:11:32 +00:00
continue
}
// Save the block data we got from the cluster
_ , err = fd . WriteAt ( buf , state . block . Offset )
if err != nil {
2015-01-07 23:12:12 +00:00
state . fail ( "save" , err )
2014-12-28 23:11:32 +00:00
} else {
2016-04-15 10:59:41 +00:00
state . pullDone ( state . block )
2014-12-28 23:11:32 +00:00
}
break
}
2015-01-07 23:12:12 +00:00
out <- state . sharedPullerState
2014-07-24 09:38:16 +02:00
}
2014-03-28 14:36:57 +01:00
}
2014-04-27 12:14:53 +02:00
2016-04-26 14:01:46 +00:00
func ( f * rwFolder ) performFinish ( state * sharedPullerState ) error {
2014-11-29 23:18:56 +01:00
// Set the correct permission bits on the new file
2016-04-26 14:01:46 +00:00
if ! f . ignorePermissions ( state . file ) {
2016-07-04 10:40:29 +00:00
if err := os . Chmod ( state . tempName , os . FileMode ( state . file . Permissions & 0777 ) ) ; err != nil {
2015-05-27 11:14:39 +02:00
return err
2014-11-16 23:18:59 +00:00
}
2014-11-29 23:18:56 +01:00
}
2014-11-16 23:18:59 +00:00
2014-11-29 23:18:56 +01:00
// Set the correct timestamp on the new file
t := time . Unix ( state . file . Modified , 0 )
2015-05-27 11:14:39 +02:00
if err := os . Chtimes ( state . tempName , t , t ) ; err != nil {
// Try using virtual mtimes instead
info , err := os . Stat ( state . tempName )
if err != nil {
return err
2014-11-16 23:18:59 +00:00
}
2016-04-26 14:01:46 +00:00
f . virtualMtimeRepo . UpdateMtime ( state . file . Name , info . ModTime ( ) , t )
2014-11-29 23:18:56 +01:00
}
2015-08-08 12:44:17 +02:00
if stat , err := osutil . Lstat ( state . realName ) ; err == nil {
// There is an old file or directory already in place. We need to
// handle that.
switch {
case stat . IsDir ( ) || stat . Mode ( ) & os . ModeSymlink != 0 :
// It's a directory or a symlink. These are not versioned or
// archived for conflicts, only removed (which of course fails for
// non-empty directories).
// TODO: This is the place where we want to remove temporary files
// and future hard ignores before attempting a directory delete.
2016-04-26 15:11:19 +00:00
// Should share code with f.deletDir().
2014-11-16 23:18:59 +00:00
2015-08-08 12:44:17 +02:00
if err = osutil . InWritableDir ( osutil . Remove , state . realName ) ; err != nil {
return err
}
2016-04-26 14:01:46 +00:00
case f . inConflict ( state . version , state . file . Version ) :
2015-08-08 12:44:17 +02:00
// The new file has been changed in conflict with the existing one. We
// should file it away as a conflict instead of just removing or
// archiving. Also merge with the version vector we had, to indicate
// we have resolved the conflict.
state . file . Version = state . file . Version . Merge ( state . version )
2016-04-26 14:01:46 +00:00
if err = osutil . InWritableDir ( f . moveForConflict , state . realName ) ; err != nil {
2015-08-08 12:44:17 +02:00
return err
}
2016-04-26 14:01:46 +00:00
case f . versioner != nil :
2015-08-08 12:44:17 +02:00
// If we should use versioning, let the versioner archive the old
// file before we replace it. Archiving a non-existent file is not
// an error.
2016-04-26 14:01:46 +00:00
if err = f . versioner . Archive ( state . realName ) ; err != nil {
2015-08-08 12:44:17 +02:00
return err
}
}
2014-11-29 23:18:56 +01:00
}
2015-08-08 12:44:17 +02:00
2016-05-22 09:06:07 +00:00
// Replace the original content with the new one. If it didn't work,
// leave the temp file in place for reuse.
if err := osutil . TryRename ( state . tempName , state . realName ) ; err != nil {
2015-05-27 11:14:39 +02:00
return err
2014-11-29 23:18:56 +01:00
}
2014-04-27 12:14:53 +02:00
2014-11-29 23:18:56 +01:00
// If it's a symlink, the target of the symlink is inside the file.
if state . file . IsSymlink ( ) {
content , err := ioutil . ReadFile ( state . realName )
2014-11-16 23:18:59 +00:00
if err != nil {
2015-05-27 11:14:39 +02:00
return err
2014-11-16 23:18:59 +00:00
}
2014-04-27 12:14:53 +02:00
2014-11-29 23:18:56 +01:00
// Remove the file, and replace it with a symlink.
err = osutil . InWritableDir ( func ( path string ) error {
os . Remove ( path )
2015-09-04 12:54:01 +02:00
tt := symlinks . TargetFile
if state . file . IsDirectory ( ) {
tt = symlinks . TargetDirectory
}
return symlinks . Create ( path , string ( content ) , tt )
2014-11-29 23:18:56 +01:00
} , state . realName )
2014-11-16 23:18:59 +00:00
if err != nil {
2015-05-27 11:14:39 +02:00
return err
2014-11-16 23:18:59 +00:00
}
}
2014-11-29 23:18:56 +01:00
// Record the updated file in the index
2016-04-26 14:01:46 +00:00
f . dbUpdates <- dbUpdateJob { state . file , dbUpdateHandleFile }
2015-05-27 11:14:39 +02:00
return nil
2014-11-16 23:18:59 +00:00
}
2014-11-09 04:26:52 +00:00
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) finisherRoutine ( in <- chan * sharedPullerState ) {
2014-11-16 23:18:59 +00:00
for state := range in {
2014-11-29 23:18:56 +01:00
if closed , err := state . finalClose ( ) ; closed {
2016-04-26 15:11:19 +00:00
l . Debugln ( f , "closing" , state . file . Name )
2014-11-29 23:18:56 +01:00
2016-04-26 15:11:19 +00:00
f . queue . Done ( state . file . Name )
2015-05-27 11:14:39 +02:00
if err == nil {
2016-04-26 15:11:19 +00:00
err = f . performFinish ( state )
2015-01-07 23:12:12 +00:00
}
2015-05-27 11:14:39 +02:00
if err != nil {
l . Infoln ( "Puller: final:" , err )
2016-04-26 15:11:19 +00:00
f . newError ( state . file . Name , err )
2015-05-27 11:14:39 +02:00
}
events . Default . Log ( events . ItemFinished , map [ string ] interface { } {
2016-04-26 15:11:19 +00:00
"folder" : f . folderID ,
2015-05-27 11:14:39 +02:00
"item" : state . file . Name ,
"error" : events . Error ( err ) ,
"type" : "file" ,
"action" : "update" ,
} )
2016-04-26 15:11:19 +00:00
if f . model . progressEmitter != nil {
f . model . progressEmitter . Deregister ( state )
2014-11-29 23:18:56 +01:00
}
2014-04-27 12:14:53 +02:00
}
}
2014-09-27 14:44:15 +02:00
}
2014-04-27 12:14:53 +02:00
2014-12-01 19:23:06 +00:00
// Moves the given filename to the front of the job queue
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) BringToFront ( filename string ) {
f . queue . BringToFront ( filename )
2014-12-01 19:23:06 +00:00
}
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) Jobs ( ) ( [ ] string , [ ] string ) {
return f . queue . Jobs ( )
2014-12-01 19:23:06 +00:00
}
2015-04-05 15:34:29 +02:00
// dbUpdaterRoutine aggregates db updates and commits them in batches no
// larger than 1000 items, and no more delayed than 2 seconds.
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) dbUpdaterRoutine ( ) {
2015-04-05 15:34:29 +02:00
const (
maxBatchSize = 1000
maxBatchTime = 2 * time . Second
)
2015-06-16 12:12:34 +01:00
batch := make ( [ ] dbUpdateJob , 0 , maxBatchSize )
files := make ( [ ] protocol . FileInfo , 0 , maxBatchSize )
2015-04-05 15:34:29 +02:00
tick := time . NewTicker ( maxBatchTime )
defer tick . Stop ( )
2015-06-16 12:12:34 +01:00
handleBatch := func ( ) {
found := false
var lastFile protocol . FileInfo
for _ , job := range batch {
files = append ( files , job . file )
if job . file . IsInvalid ( ) || ( job . file . IsDirectory ( ) && ! job . file . IsSymlink ( ) ) {
continue
}
if job . jobType & ( dbUpdateHandleFile | dbUpdateDeleteFile ) == 0 {
continue
}
found = true
lastFile = job . file
}
2016-05-19 00:19:26 +00:00
// All updates to file/folder objects that originated remotely
// (across the network) use this call to updateLocals
f . model . updateLocalsFromPulling ( f . folderID , files )
2015-06-16 12:12:34 +01:00
if found {
2016-04-26 15:11:19 +00:00
f . model . receivedFile ( f . folderID , lastFile )
2015-06-16 12:12:34 +01:00
}
batch = batch [ : 0 ]
files = files [ : 0 ]
}
2015-04-05 15:34:29 +02:00
loop :
for {
select {
2016-04-26 15:11:19 +00:00
case job , ok := <- f . dbUpdates :
2015-04-05 15:34:29 +02:00
if ! ok {
break loop
}
2016-07-29 19:54:24 +00:00
job . file . Sequence = 0
2015-06-16 12:12:34 +01:00
batch = append ( batch , job )
2015-04-05 15:34:29 +02:00
if len ( batch ) == maxBatchSize {
2015-06-16 12:12:34 +01:00
handleBatch ( )
2015-04-05 15:34:29 +02:00
}
case <- tick . C :
if len ( batch ) > 0 {
2015-06-16 12:12:34 +01:00
handleBatch ( )
2015-04-05 15:34:29 +02:00
}
}
}
if len ( batch ) > 0 {
2015-06-16 12:12:34 +01:00
handleBatch ( )
2015-04-05 15:34:29 +02:00
}
}
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) inConflict ( current , replacement protocol . Vector ) bool {
2015-04-09 12:53:41 +02:00
if current . Concurrent ( replacement ) {
// Obvious case
return true
}
2016-04-26 15:11:19 +00:00
if replacement . Counter ( f . model . shortID ) > current . Counter ( f . model . shortID ) {
2015-04-09 12:53:41 +02:00
// The replacement file contains a higher version for ourselves than
// what we have. This isn't supposed to be possible, since it's only
// we who can increment that counter. We take it as a sign that
// something is wrong (our index may have been corrupted or removed)
// and flag it as a conflict.
return true
}
return false
}
2016-04-15 10:59:41 +00:00
func removeAvailability ( availabilities [ ] Availability , availability Availability ) [ ] Availability {
for i := range availabilities {
if availabilities [ i ] == availability {
availabilities [ i ] = availabilities [ len ( availabilities ) - 1 ]
return availabilities [ : len ( availabilities ) - 1 ]
2014-12-28 23:11:32 +00:00
}
}
2016-04-15 10:59:41 +00:00
return availabilities
2014-12-28 23:11:32 +00:00
}
2015-03-29 16:16:36 +02:00
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) moveForConflict ( name string ) error {
2016-01-03 21:15:02 +01:00
if strings . Contains ( filepath . Base ( name ) , ".sync-conflict-" ) {
l . Infoln ( "Conflict for" , name , "which is already a conflict copy; not copying again." )
if err := osutil . Remove ( name ) ; err != nil && ! os . IsNotExist ( err ) {
return err
}
return nil
}
2016-04-26 15:11:19 +00:00
if f . maxConflicts == 0 {
2015-10-13 20:50:58 +01:00
if err := osutil . Remove ( name ) ; err != nil && ! os . IsNotExist ( err ) {
return err
}
return nil
}
2015-03-29 16:16:36 +02:00
ext := filepath . Ext ( name )
withoutExt := name [ : len ( name ) - len ( ext ) ]
newName := withoutExt + time . Now ( ) . Format ( ".sync-conflict-20060102-150405" ) + ext
2015-04-28 11:33:54 +02:00
err := os . Rename ( name , newName )
if os . IsNotExist ( err ) {
// We were supposed to move a file away but it does not exist. Either
// the user has already moved it away, or the conflict was between a
// remote modification and a local delete. In either way it does not
// matter, go ahead as if the move succeeded.
2015-10-13 20:50:58 +01:00
err = nil
}
2016-04-26 15:11:19 +00:00
if f . maxConflicts > - 1 {
2015-10-13 20:50:58 +01:00
matches , gerr := osutil . Glob ( withoutExt + ".sync-conflict-????????-??????" + ext )
2016-04-26 15:11:19 +00:00
if gerr == nil && len ( matches ) > f . maxConflicts {
2015-10-13 20:50:58 +01:00
sort . Sort ( sort . Reverse ( sort . StringSlice ( matches ) ) )
2016-04-26 15:11:19 +00:00
for _ , match := range matches [ f . maxConflicts : ] {
2015-10-13 20:50:58 +01:00
gerr = osutil . Remove ( match )
if gerr != nil {
2016-04-26 15:11:19 +00:00
l . Debugln ( f , "removing extra conflict" , gerr )
2015-10-13 20:50:58 +01:00
}
}
} else if gerr != nil {
2016-04-26 15:11:19 +00:00
l . Debugln ( f , "globbing for conflicts" , gerr )
2015-10-13 20:50:58 +01:00
}
2015-04-28 11:33:54 +02:00
}
return err
2015-03-29 16:16:36 +02:00
}
2015-06-26 13:31:30 +02:00
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) newError ( path string , err error ) {
f . errorsMut . Lock ( )
defer f . errorsMut . Unlock ( )
2015-06-26 13:31:30 +02:00
// We might get more than one error report for a file (i.e. error on
// Write() followed by Close()); we keep the first error as that is
// probably closer to the root cause.
2016-04-26 15:11:19 +00:00
if _ , ok := f . errors [ path ] ; ok {
2015-06-26 13:31:30 +02:00
return
}
2016-04-26 15:11:19 +00:00
f . errors [ path ] = err . Error ( )
2015-06-26 13:31:30 +02:00
}
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) clearErrors ( ) {
f . errorsMut . Lock ( )
f . errors = make ( map [ string ] string )
f . errorsMut . Unlock ( )
2015-06-26 13:31:30 +02:00
}
2016-04-26 15:11:19 +00:00
func ( f * rwFolder ) currentErrors ( ) [ ] fileError {
f . errorsMut . Lock ( )
errors := make ( [ ] fileError , 0 , len ( f . errors ) )
for path , err := range f . errors {
2015-06-26 13:31:30 +02:00
errors = append ( errors , fileError { path , err } )
}
sort . Sort ( fileErrorList ( errors ) )
2016-04-26 15:11:19 +00:00
f . errorsMut . Unlock ( )
2015-06-26 13:31:30 +02:00
return errors
}
// A []fileError is sent as part of an event and will be JSON serialized.
type fileError struct {
Path string ` json:"path" `
Err string ` json:"error" `
}
type fileErrorList [ ] fileError
func ( l fileErrorList ) Len ( ) int {
return len ( l )
}
func ( l fileErrorList ) Less ( a , b int ) bool {
return l [ a ] . Path < l [ b ] . Path
}
func ( l fileErrorList ) Swap ( a , b int ) {
l [ a ] , l [ b ] = l [ b ] , l [ a ]
}
2016-08-05 07:13:52 +00:00
// fileValid returns nil when the file is valid for processing, or an error if it's not
func fileValid ( file db . FileIntf ) error {
switch {
case file . IsDeleted ( ) :
// We don't care about file validity if we're not supposed to have it
return nil
case ! symlinks . Supported && file . IsSymlink ( ) :
return errUnsupportedSymlink
case runtime . GOOS == "windows" && windowsInvalidFilename ( file . FileName ( ) ) :
return errInvalidFilename
}
return nil
}
var windowsDisallowedCharacters = string ( [ ] rune {
'<' , '>' , ':' , '"' , '|' , '?' , '*' ,
0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ,
11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19 , 20 ,
21 , 22 , 23 , 24 , 25 , 26 , 27 , 28 , 29 , 30 ,
31 ,
} )
func windowsInvalidFilename ( name string ) bool {
// None of the path components should end in space
for _ , part := range strings . Split ( name , ` \ ` ) {
if len ( part ) == 0 {
continue
}
if part [ len ( part ) - 1 ] == ' ' {
// Names ending in space are not valid.
return true
}
}
// The path must not contain any disallowed characters
return strings . ContainsAny ( name , windowsDisallowedCharacters )
}