2014-11-16 21:13:20 +01:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 21:43:32 +02:00
//
2015-03-07 21:36:35 +01:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-09 07:52:18 +01:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-06-01 22:50:14 +02:00
2014-05-15 00:26:55 -03:00
package model
2013-12-15 11:43:31 +01:00
import (
2018-05-05 09:24:44 +01:00
"bytes"
2015-03-10 23:45:43 +01:00
"encoding/json"
2014-01-06 21:31:36 +01:00
"errors"
2013-12-23 12:12:44 -05:00
"fmt"
2014-01-05 23:54:57 +01:00
"net"
2014-03-28 14:36:57 +01:00
"path/filepath"
2015-06-03 09:47:39 +02:00
"reflect"
2019-01-05 18:10:02 +01:00
"regexp"
2015-04-29 20:46:32 +02:00
"runtime"
2014-08-11 20:20:01 +02:00
"strings"
2018-10-05 10:26:25 +02:00
stdsync "sync"
2013-12-15 11:43:31 +01:00
"time"
2014-06-21 09:43:12 +02:00
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/config"
2016-05-04 19:38:12 +00:00
"github.com/syncthing/syncthing/lib/connections"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
2016-08-05 17:45:45 +00:00
"github.com/syncthing/syncthing/lib/fs"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 19:38:46 +02:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/sync"
2016-12-17 19:48:33 +00:00
"github.com/syncthing/syncthing/lib/upgrade"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/versioner"
2018-09-08 11:56:56 +02:00
"github.com/thejerf/suture"
2013-12-15 11:43:31 +01:00
)
2018-01-01 14:39:23 +00:00
var locationLocal * time . Location
func init ( ) {
var err error
locationLocal , err = time . LoadLocation ( "Local" )
if err != nil {
panic ( err . Error ( ) )
}
}
2014-07-15 13:04:37 +02:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 20:54:59 +02:00
const (
2017-04-22 14:23:33 +00:00
maxBatchSizeBytes = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
maxBatchSizeFiles = 1000 // Either way, don't include more files than this
2014-08-11 20:54:59 +02:00
)
2014-07-15 13:04:37 +02:00
2014-09-30 17:52:05 +02:00
type service interface {
2014-12-30 09:35:21 +01:00
BringToFront ( string )
2019-04-07 13:29:17 +02:00
Override ( )
Revert ( )
2015-05-01 14:30:17 +02:00
DelayScan ( d time . Duration )
2017-11-20 16:29:36 +00:00
SchedulePull ( ) // something relevant changed, we should try a pull
2016-06-29 06:37:34 +00:00
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2015-06-20 19:26:25 +02:00
Scan ( subs [ ] string ) error
2016-06-29 06:37:34 +00:00
Serve ( )
Stop ( )
2017-10-24 07:58:55 +00:00
CheckHealth ( ) error
2018-11-07 11:04:41 +01:00
Errors ( ) [ ] FileError
2018-02-04 22:46:24 +01:00
WatchError ( ) error
2019-04-07 13:29:17 +02:00
ForceRescan ( file protocol . FileInfo ) error
2019-03-11 17:57:21 +01:00
GetStatistics ( ) stats . FolderStatistics
2015-03-16 21:14:19 +01:00
2016-06-29 06:37:34 +00:00
getState ( ) ( folderState , time . Time , error )
2015-04-13 05:12:01 +09:00
setState ( state folderState )
2016-06-29 06:37:34 +00:00
setError ( err error )
2014-09-30 17:52:05 +02:00
}
2016-04-15 10:59:41 +00:00
type Availability struct {
ID protocol . DeviceID ` json:"id" `
FromTemporary bool ` json:"fromTemporary" `
}
2019-02-26 09:09:25 +01:00
type Model interface {
suture . Service
connections . Model
AddFolder ( cfg config . FolderConfiguration )
RestartFolder ( from , to config . FolderConfiguration )
StartFolder ( folder string )
ResetFolder ( folder string )
DelayScan ( folder string , next time . Duration )
ScanFolder ( folder string ) error
ScanFolders ( ) map [ string ] error
ScanFolderSubdirs ( folder string , subs [ ] string ) error
State ( folder string ) ( string , time . Time , error )
FolderErrors ( folder string ) ( [ ] FileError , error )
WatchError ( folder string ) error
Override ( folder string )
Revert ( folder string )
BringToFront ( folder , file string )
GetIgnores ( folder string ) ( [ ] string , [ ] string , error )
SetIgnores ( folder string , content [ ] string ) error
GetFolderVersions ( folder string ) ( map [ string ] [ ] versioner . FileVersion , error )
RestoreFolderVersions ( folder string , versions map [ string ] time . Time ) ( map [ string ] string , error )
LocalChangedFiles ( folder string , page , perpage int ) [ ] db . FileInfoTruncated
NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated )
RemoteNeedFolderFiles ( device protocol . DeviceID , folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , error )
CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool )
CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool )
Availability ( folder string , file protocol . FileInfo , block protocol . BlockInfo ) [ ] Availability
GlobalSize ( folder string ) db . Counts
LocalSize ( folder string ) db . Counts
NeedSize ( folder string ) db . Counts
ReceiveOnlyChangedSize ( folder string ) db . Counts
CurrentSequence ( folder string ) ( int64 , bool )
RemoteSequence ( folder string ) ( int64 , bool )
Completion ( device protocol . DeviceID , folder string ) FolderCompletion
ConnectionStats ( ) map [ string ] interface { }
DeviceStatistics ( ) map [ string ] stats . DeviceStatistics
FolderStatistics ( ) map [ string ] stats . FolderStatistics
UsageReportingStats ( version int , preview bool ) map [ string ] interface { }
StartDeadlockDetector ( timeout time . Duration )
GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { }
}
type model struct {
2015-06-12 13:04:00 +02:00
* suture . Supervisor
2019-02-26 09:09:25 +01:00
cfg config . Wrapper
2018-10-10 11:34:24 +02:00
db * db . Lowlevel
2015-07-23 16:13:53 +02:00
finder * db . BlockFinder
progressEmitter * ProgressEmitter
id protocol . DeviceID
2016-01-20 11:10:22 -08:00
shortID protocol . ShortID
2015-07-23 16:13:53 +02:00
cacheIgnoredFiles bool
2015-10-18 20:13:58 -04:00
protectedFiles [ ] string
2014-05-15 00:26:55 -03:00
clientName string
clientVersion string
2018-10-05 10:26:25 +02:00
fmut sync . RWMutex // protects the below
2015-11-13 13:30:52 +01:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
folderFiles map [ string ] * db . FileSet // folder -> files
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
folderRunners map [ string ] service // folder -> puller or scanner
folderRunnerTokens map [ string ] [ ] suture . ServiceToken // folder -> tokens for puller or scanner
2018-10-05 10:26:25 +02:00
folderRestartMuts syncMutexMap // folder -> restart mutex
2014-03-29 18:53:48 +01:00
2018-10-05 10:26:25 +02:00
pmut sync . RWMutex // protects the below
2016-12-21 18:41:25 +00:00
conn map [ protocol . DeviceID ] connections . Connection
2018-11-13 08:53:55 +01:00
connRequestLimiters map [ protocol . DeviceID ] * byteSemaphore
2016-12-21 18:41:25 +00:00
closed map [ protocol . DeviceID ] chan struct { }
helloMessages map [ protocol . DeviceID ] protocol . HelloResult
deviceDownloads map [ protocol . DeviceID ] * deviceDownloadState
remotePausedFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
2018-10-05 10:26:25 +02:00
foldersRunning int32 // for testing only
2013-12-15 11:43:31 +01:00
}
2019-03-11 07:28:54 +01:00
type folderFactory func ( * model , * db . FileSet , * ignore . Matcher , config . FolderConfiguration , versioner . Versioner , fs . Filesystem ) service
2016-05-04 10:47:33 +00:00
2014-01-07 22:44:21 +01:00
var (
2019-02-02 12:09:07 +01:00
folderFactories = make ( map [ config . FolderType ] folderFactory )
2014-01-07 22:44:21 +01:00
)
2014-01-06 21:31:36 +01:00
2016-06-26 10:07:27 +00:00
var (
2017-10-24 07:58:55 +00:00
errDeviceUnknown = errors . New ( "unknown device" )
errDevicePaused = errors . New ( "device is paused" )
errDeviceIgnored = errors . New ( "device is ignored" )
2018-01-14 17:01:06 +00:00
ErrFolderPaused = errors . New ( "folder is paused" )
2017-12-15 20:01:56 +00:00
errFolderNotRunning = errors . New ( "folder is not running" )
2017-10-24 07:58:55 +00:00
errFolderMissing = errors . New ( "no such folder" )
errNetworkNotAllowed = errors . New ( "network not allowed" )
2019-01-09 17:31:09 +01:00
// errors about why a connection is closed
errIgnoredFolderRemoved = errors . New ( "folder no longer ignored" )
errReplacingConnection = errors . New ( "replacing connection" )
2016-06-26 10:07:27 +00:00
)
2014-01-06 11:11:18 +01:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 12:00:38 +01:00
// for file data without altering the local folder in any way.
2019-02-26 09:09:25 +01:00
func NewModel ( cfg config . Wrapper , id protocol . DeviceID , clientName , clientVersion string , ldb * db . Lowlevel , protectedFiles [ ] string ) Model {
m := & model {
2015-07-11 11:12:20 +10:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( line )
2015-07-11 11:12:20 +10:00
} ,
2018-09-08 11:56:56 +02:00
PassThroughPanics : true ,
2015-07-11 11:12:20 +10:00
} ) ,
2016-12-21 18:41:25 +00:00
cfg : cfg ,
db : ldb ,
finder : db . NewBlockFinder ( ldb ) ,
progressEmitter : NewProgressEmitter ( cfg ) ,
id : id ,
shortID : id . Short ( ) ,
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
protectedFiles : protectedFiles ,
clientName : clientName ,
clientVersion : clientVersion ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderRunnerTokens : make ( map [ string ] [ ] suture . ServiceToken ) ,
conn : make ( map [ protocol . DeviceID ] connections . Connection ) ,
2018-11-13 08:53:55 +01:00
connRequestLimiters : make ( map [ protocol . DeviceID ] * byteSemaphore ) ,
2016-12-21 18:41:25 +00:00
closed : make ( map [ protocol . DeviceID ] chan struct { } ) ,
helloMessages : make ( map [ protocol . DeviceID ] protocol . HelloResult ) ,
deviceDownloads : make ( map [ protocol . DeviceID ] * deviceDownloadState ) ,
remotePausedFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
fmut : sync . NewRWMutex ( ) ,
pmut : sync . NewRWMutex ( ) ,
2013-12-15 11:43:31 +01:00
}
2019-04-13 14:20:51 +02:00
m . Add ( m . progressEmitter )
2018-12-05 07:40:05 +00:00
scanLimiter . setCapacity ( cfg . Options ( ) . MaxConcurrentScans )
2016-08-07 16:21:59 +00:00
cfg . Subscribe ( m )
2013-12-15 11:43:31 +01:00
return m
}
2015-04-28 22:32:10 +02:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2019-02-26 09:09:25 +01:00
func ( m * model ) StartDeadlockDetector ( timeout time . Duration ) {
2015-04-08 13:35:03 +01:00
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2016-10-30 00:14:38 +01:00
detector := newDeadlockDetector ( timeout )
detector . Watch ( "fmut" , m . fmut )
detector . Watch ( "pmut" , m . pmut )
2015-04-08 13:35:03 +01:00
}
2016-06-26 10:07:27 +00:00
// StartFolder constructs the folder service and starts it.
2019-02-26 09:09:25 +01:00
func ( m * model ) StartFolder ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
2017-08-08 15:23:29 +02:00
folderCfg := m . folderCfgs [ folder ]
2019-04-21 14:21:36 +02:00
m . startFolderLocked ( folderCfg )
2016-12-21 18:41:25 +00:00
m . pmut . Unlock ( )
2016-08-07 16:21:59 +00:00
m . fmut . Unlock ( )
2019-04-21 14:21:36 +02:00
l . Infof ( "Ready to synchronize %s (%s)" , folderCfg . Description ( ) , folderCfg . Type )
2016-08-07 16:21:59 +00:00
}
2019-04-21 14:21:36 +02:00
func ( m * model ) startFolderLocked ( cfg config . FolderConfiguration ) {
if err := m . checkFolderRunningLocked ( cfg . ID ) ; err == errFolderMissing {
panic ( "cannot start nonexistent folder " + cfg . Description ( ) )
2017-12-15 20:01:56 +00:00
} else if err == nil {
2019-04-21 14:21:36 +02:00
panic ( "cannot start already running folder " + cfg . Description ( ) )
2014-09-27 14:44:15 +02:00
}
2016-05-04 10:47:33 +00:00
folderFactory , ok := folderFactories [ cfg . Type ]
if ! ok {
2016-05-04 11:26:36 +00:00
panic ( fmt . Sprintf ( "unknown folder type 0x%x" , cfg . Type ) )
2016-05-04 10:47:33 +00:00
}
2019-04-21 14:21:36 +02:00
folder := cfg . ID
2019-03-11 07:28:54 +01:00
fset := m . folderFiles [ folder ]
2016-08-07 16:21:59 +00:00
// Find any devices for which we hold the index in the db, but the folder
// is not shared, and drop it.
expected := mapDevices ( cfg . DeviceIDs ( ) )
2019-03-11 07:28:54 +01:00
for _ , available := range fset . ListDevices ( ) {
2016-08-07 16:21:59 +00:00
if _ , ok := expected [ available ] ; ! ok {
l . Debugln ( "dropping" , folder , "state for" , available )
2019-03-11 07:28:54 +01:00
fset . Drop ( available )
2016-08-07 16:21:59 +00:00
}
}
2016-12-21 18:41:25 +00:00
// Close connections to affected devices
for _ , id := range cfg . DeviceIDs ( ) {
2019-01-09 17:31:09 +01:00
m . closeLocked ( id , fmt . Errorf ( "started folder %v" , cfg . Description ( ) ) )
2016-12-21 18:41:25 +00:00
}
2019-03-11 07:28:54 +01:00
v , ok := fset . Sequence ( protocol . LocalDeviceID ) , true
2016-06-26 10:07:27 +00:00
indexHasFiles := ok && v > 0
if ! indexHasFiles {
// It's a blank folder, so this may the first time we're looking at
// it. Attempt to create and tag with our marker as appropriate. We
// don't really do anything with errors at this point except warn -
// if these things don't work, we still want to start the folder and
// it'll show up as errored later.
2018-06-10 15:41:20 +02:00
if err := cfg . CreateRoot ( ) ; err != nil {
l . Warnln ( "Failed to create folder root directory" , err )
} else if err = cfg . CreateMarker ( ) ; err != nil {
l . Warnln ( "Failed to create folder marker:" , err )
2016-06-26 10:07:27 +00:00
}
}
2018-01-01 14:39:23 +00:00
ver := cfg . Versioner ( )
if service , ok := ver . ( suture . Service ) ; ok {
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
token := m . Add ( service )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2014-03-29 18:53:48 +01:00
}
2014-09-27 14:44:15 +02:00
2019-03-11 07:28:54 +01:00
ffs := fset . MtimeFS ( )
2017-09-20 06:49:04 +00:00
// These are our metadata files, and they should always be hidden.
2019-02-02 12:16:27 +01:00
ffs . Hide ( config . DefaultMarkerName )
ffs . Hide ( ".stversions" )
ffs . Hide ( ".stignore" )
2017-09-20 06:49:04 +00:00
2019-03-11 07:28:54 +01:00
p := folderFactory ( m , fset , m . folderIgnores [ folder ] , cfg , ver , ffs )
2017-10-20 14:52:55 +00:00
2016-05-04 10:47:33 +00:00
m . folderRunners [ folder ] = p
2015-10-18 20:13:58 -04:00
m . warnAboutOverwritingProtectedFiles ( folder )
2015-11-13 13:30:52 +01:00
token := m . Add ( p )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
2019-02-26 09:09:25 +01:00
func ( m * model ) warnAboutOverwritingProtectedFiles ( folder string ) {
2016-12-16 22:23:35 +00:00
if m . folderCfgs [ folder ] . Type == config . FolderTypeSendOnly {
2015-10-18 20:13:58 -04:00
return
}
2017-08-19 14:36:56 +00:00
// This is a bit of a hack.
ffs := m . folderCfgs [ folder ] . Filesystem ( )
if ffs . Type ( ) != fs . FilesystemTypeBasic {
return
}
folderLocation := ffs . URI ( )
2015-10-18 20:13:58 -04:00
ignores := m . folderIgnores [ folder ]
var filesAtRisk [ ] string
for _ , protectedFilePath := range m . protectedFiles {
// check if file is synced in this folder
2018-11-22 11:16:45 +01:00
if protectedFilePath != folderLocation && ! fs . IsParent ( protectedFilePath , folderLocation ) {
2015-10-18 20:13:58 -04:00
continue
}
// check if file is ignored
2017-03-04 07:49:48 +00:00
relPath , _ := filepath . Rel ( folderLocation , protectedFilePath )
if ignores . Match ( relPath ) . IsIgnored ( ) {
2015-10-18 20:13:58 -04:00
continue
}
filesAtRisk = append ( filesAtRisk , protectedFilePath )
}
if len ( filesAtRisk ) > 0 {
2016-10-27 17:02:19 +00:00
l . Warnln ( "Some protected files may be overwritten and cause issues. See https://docs.syncthing.net/users/config.html#syncing-configuration-files for more information. The at risk files are:" , strings . Join ( filesAtRisk , ", " ) )
2015-10-18 20:13:58 -04:00
}
}
2019-02-26 09:09:25 +01:00
func ( m * model ) AddFolder ( cfg config . FolderConfiguration ) {
2016-08-07 16:21:59 +00:00
if len ( cfg . ID ) == 0 {
panic ( "cannot add empty folder id" )
}
2017-08-19 14:36:56 +00:00
if len ( cfg . Path ) == 0 {
panic ( "cannot add empty folder path" )
}
2016-08-07 16:21:59 +00:00
m . fmut . Lock ( )
m . addFolderLocked ( cfg )
m . fmut . Unlock ( )
}
2019-02-26 09:09:25 +01:00
func ( m * model ) addFolderLocked ( cfg config . FolderConfiguration ) {
2016-08-07 16:21:59 +00:00
m . folderCfgs [ cfg . ID ] = cfg
2017-08-19 14:36:56 +00:00
folderFs := cfg . Filesystem ( )
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , folderFs , m . db )
2016-08-07 16:21:59 +00:00
2017-08-19 14:36:56 +00:00
ignores := ignore . New ( folderFs , ignore . WithCache ( m . cacheIgnoredFiles ) )
if err := ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
2016-08-07 16:21:59 +00:00
l . Warnln ( "Loading ignores:" , err )
}
m . folderIgnores [ cfg . ID ] = ignores
}
2019-02-26 09:09:25 +01:00
func ( m * model ) RemoveFolder ( cfg config . FolderConfiguration ) {
2015-11-13 13:30:52 +01:00
m . fmut . Lock ( )
m . pmut . Lock ( )
2019-04-21 14:21:36 +02:00
defer m . fmut . Unlock ( )
defer m . pmut . Unlock ( )
2017-01-07 17:05:30 +00:00
// Delete syncthing specific files
2019-02-02 12:16:27 +01:00
cfg . Filesystem ( ) . RemoveAll ( config . DefaultMarkerName )
2017-01-07 17:05:30 +00:00
2019-01-09 17:31:09 +01:00
m . tearDownFolderLocked ( cfg , fmt . Errorf ( "removing folder %v" , cfg . Description ( ) ) )
2016-08-07 16:21:59 +00:00
// Remove it from the database
2017-10-03 23:53:02 +01:00
db . DropFolder ( m . db , cfg . ID )
2016-08-07 16:21:59 +00:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) tearDownFolderLocked ( cfg config . FolderConfiguration , err error ) {
2018-07-04 09:07:33 +02:00
// Close connections to affected devices
// Must happen before stopping the folder service to abort ongoing
// transmissions and thus allow timely service termination.
for _ , dev := range cfg . Devices {
2019-01-09 17:31:09 +01:00
m . closeLocked ( dev . DeviceID , err )
2018-07-04 09:07:33 +02:00
}
2018-06-10 13:24:59 +02:00
// Stop the services running for this folder and wait for them to finish
// stopping to prevent races on restart.
tokens := m . folderRunnerTokens [ cfg . ID ]
2018-06-24 17:55:28 +02:00
m . pmut . Unlock ( )
2018-06-10 13:24:59 +02:00
m . fmut . Unlock ( )
for _ , id := range tokens {
2019-02-02 12:16:27 +01:00
m . RemoveAndWait ( id , 0 )
2015-11-13 13:30:52 +01:00
}
2018-06-10 13:24:59 +02:00
m . fmut . Lock ( )
2018-06-24 17:55:28 +02:00
m . pmut . Lock ( )
2015-11-13 13:30:52 +01:00
// Clean up our config maps
2018-06-06 23:34:11 +02:00
delete ( m . folderCfgs , cfg . ID )
delete ( m . folderFiles , cfg . ID )
delete ( m . folderIgnores , cfg . ID )
delete ( m . folderRunners , cfg . ID )
delete ( m . folderRunnerTokens , cfg . ID )
2016-08-07 16:21:59 +00:00
}
2015-11-13 13:30:52 +01:00
2019-02-26 09:09:25 +01:00
func ( m * model ) RestartFolder ( from , to config . FolderConfiguration ) {
2018-06-06 23:34:11 +02:00
if len ( to . ID ) == 0 {
2018-10-05 10:26:25 +02:00
panic ( "bug: cannot restart empty folder ID" )
}
if to . ID != from . ID {
panic ( fmt . Sprintf ( "bug: folder restart cannot change ID %q -> %q" , from . ID , to . ID ) )
2016-08-07 16:21:59 +00:00
}
2018-10-05 10:26:25 +02:00
// This mutex protects the entirety of the restart operation, preventing
// there from being more than one folder restart operation in progress
// at any given time. The usual fmut/pmut stuff doesn't cover this,
// because those locks are released while we are waiting for the folder
// to shut down (and must be so because the folder might need them as
// part of its operations before shutting down).
restartMut := m . folderRestartMuts . Get ( to . ID )
restartMut . Lock ( )
defer restartMut . Unlock ( )
2016-08-07 16:21:59 +00:00
m . fmut . Lock ( )
m . pmut . Lock ( )
2018-10-05 10:26:25 +02:00
defer m . fmut . Unlock ( )
defer m . pmut . Unlock ( )
2016-08-07 16:21:59 +00:00
2019-01-09 17:31:09 +01:00
var infoMsg string
var errMsg string
switch {
case to . Paused :
infoMsg = "Paused"
errMsg = "pausing"
case from . Paused :
infoMsg = "Unpaused"
errMsg = "unpausing"
default :
infoMsg = "Restarted"
errMsg = "restarting"
}
m . tearDownFolderLocked ( from , fmt . Errorf ( "%v folder %v" , errMsg , to . Description ( ) ) )
if ! to . Paused {
2018-06-06 23:34:11 +02:00
m . addFolderLocked ( to )
2019-04-21 14:21:36 +02:00
m . startFolderLocked ( to )
2016-12-21 18:41:25 +00:00
}
2019-01-09 17:31:09 +01:00
l . Infof ( "%v folder %v (%v)" , infoMsg , to . Description ( ) , to . Type )
2015-11-13 13:30:52 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) UsageReportingStats ( version int , preview bool ) map [ string ] interface { } {
2017-10-12 06:16:46 +00:00
stats := make ( map [ string ] interface { } )
if version >= 3 {
// Block stats
2017-11-09 21:16:29 +00:00
blockStatsMut . Lock ( )
copyBlockStats := make ( map [ string ] int )
for k , v := range blockStats {
copyBlockStats [ k ] = v
if ! preview {
blockStats [ k ] = 0
2017-10-12 06:16:46 +00:00
}
}
2017-11-09 21:16:29 +00:00
blockStatsMut . Unlock ( )
stats [ "blockStats" ] = copyBlockStats
2017-10-12 06:16:46 +00:00
// Transport stats
2019-04-21 14:21:36 +02:00
m . pmut . RLock ( )
2017-10-12 06:16:46 +00:00
transportStats := make ( map [ string ] int )
for _ , conn := range m . conn {
transportStats [ conn . Transport ( ) ] ++
}
2019-04-21 14:21:36 +02:00
m . pmut . RUnlock ( )
2017-10-12 06:16:46 +00:00
stats [ "transportStats" ] = transportStats
// Ignore stats
ignoreStats := map [ string ] int {
"lines" : 0 ,
"inverts" : 0 ,
"folded" : 0 ,
"deletable" : 0 ,
"rooted" : 0 ,
"includes" : 0 ,
"escapedIncludes" : 0 ,
"doubleStars" : 0 ,
"stars" : 0 ,
}
var seenPrefix [ 3 ] bool
for folder := range m . cfg . Folders ( ) {
lines , _ , err := m . GetIgnores ( folder )
if err != nil {
continue
}
ignoreStats [ "lines" ] += len ( lines )
for _ , line := range lines {
// Allow prefixes to be specified in any order, but only once.
for {
if strings . HasPrefix ( line , "!" ) && ! seenPrefix [ 0 ] {
seenPrefix [ 0 ] = true
line = line [ 1 : ]
ignoreStats [ "inverts" ] += 1
} else if strings . HasPrefix ( line , "(?i)" ) && ! seenPrefix [ 1 ] {
seenPrefix [ 1 ] = true
line = line [ 4 : ]
ignoreStats [ "folded" ] += 1
} else if strings . HasPrefix ( line , "(?d)" ) && ! seenPrefix [ 2 ] {
seenPrefix [ 2 ] = true
line = line [ 4 : ]
ignoreStats [ "deletable" ] += 1
} else {
seenPrefix [ 0 ] = false
seenPrefix [ 1 ] = false
seenPrefix [ 2 ] = false
break
}
}
// Noops, remove
2019-02-02 12:09:07 +01:00
line = strings . TrimSuffix ( line , "**" )
line = strings . TrimPrefix ( line , "**/" )
2017-10-12 06:16:46 +00:00
if strings . HasPrefix ( line , "/" ) {
ignoreStats [ "rooted" ] += 1
} else if strings . HasPrefix ( line , "#include " ) {
ignoreStats [ "includes" ] += 1
if strings . Contains ( line , ".." ) {
ignoreStats [ "escapedIncludes" ] += 1
}
}
if strings . Contains ( line , "**" ) {
ignoreStats [ "doubleStars" ] += 1
// Remove not to trip up star checks.
2019-02-02 12:09:07 +01:00
line = strings . Replace ( line , "**" , "" , - 1 )
2017-10-12 06:16:46 +00:00
}
if strings . Contains ( line , "*" ) {
ignoreStats [ "stars" ] += 1
}
}
}
stats [ "ignoreStats" ] = ignoreStats
}
return stats
}
2014-01-05 23:54:57 +01:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 21:56:10 +02:00
Connected bool
Paused bool
2014-01-23 13:12:45 +01:00
Address string
ClientVersion string
2016-05-04 19:38:12 +00:00
Type string
2019-02-26 11:49:02 +01:00
Crypto string
2014-01-05 23:54:57 +01:00
}
2015-03-10 23:45:43 +01:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 21:56:10 +02:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 23:45:43 +01:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2016-05-04 19:38:12 +00:00
"type" : info . Type ,
2019-02-26 11:49:02 +01:00
"crypto" : info . Crypto ,
2015-03-10 23:45:43 +01:00
} )
}
2015-11-09 23:48:58 +01:00
// ConnectionStats returns a map with connection statistics for each device.
2019-02-26 09:09:25 +01:00
func ( m * model ) ConnectionStats ( ) map [ string ] interface { } {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-09-14 19:38:55 +00:00
m . pmut . RLock ( )
2014-01-05 16:16:37 +01:00
2015-08-23 21:56:10 +02:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
2016-12-21 18:41:25 +00:00
for device , deviceCfg := range devs {
2016-03-25 20:29:07 +00:00
hello := m . helloMessages [ device ]
versionString := hello . ClientVersion
if hello . ClientName != "syncthing" {
versionString = hello . ClientName + " " + hello . ClientVersion
}
2014-01-05 23:54:57 +01:00
ci := ConnectionInfo {
2016-05-04 19:38:12 +00:00
ClientVersion : strings . TrimSpace ( versionString ) ,
2016-12-21 18:41:25 +00:00
Paused : deviceCfg . Paused ,
2014-01-05 23:54:57 +01:00
}
2015-08-23 21:56:10 +02:00
if conn , ok := m . conn [ device ] ; ok {
2016-11-30 07:54:20 +00:00
ci . Type = conn . Type ( )
2019-02-26 11:49:02 +01:00
ci . Crypto = conn . Crypto ( )
2015-08-23 21:56:10 +02:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 23:54:57 +01:00
}
2014-02-13 12:41:37 +01:00
2015-04-07 13:20:40 +01:00
conns [ device . String ( ) ] = ci
2013-12-30 09:30:29 -05:00
}
2014-01-17 20:06:44 -07:00
2015-04-07 13:20:40 +01:00
res [ "connections" ] = conns
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2016-09-14 19:38:55 +00:00
m . fmut . RUnlock ( )
2014-03-28 14:36:57 +01:00
2014-05-24 21:34:11 +02:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 21:56:05 +02:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 21:34:11 +02:00
} ,
}
2014-01-05 16:16:37 +01:00
return res
2013-12-30 09:30:29 -05:00
}
2015-04-28 22:32:10 +02:00
// DeviceStatistics returns statistics about each device
2019-02-26 09:09:25 +01:00
func ( m * model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
2016-12-06 08:54:04 +00:00
res := make ( map [ string ] stats . DeviceStatistics )
2014-10-06 09:25:45 +02:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 23:45:40 +01:00
}
return res
}
2015-04-28 22:32:10 +02:00
// FolderStatistics returns statistics about each folder
2019-02-26 09:09:25 +01:00
func ( m * model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
2016-12-06 08:54:04 +00:00
res := make ( map [ string ] stats . FolderStatistics )
2019-03-11 17:57:21 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
for id , runner := range m . folderRunners {
res [ id ] = runner . GetStatistics ( )
2014-12-07 20:21:12 +00:00
}
return res
}
2016-08-12 06:41:43 +00:00
type FolderCompletion struct {
CompletionPct float64
NeedBytes int64
2017-12-15 20:01:56 +00:00
NeedItems int64
2016-08-12 06:41:43 +00:00
GlobalBytes int64
2016-09-02 06:45:46 +00:00
NeedDeletes int64
2016-08-12 06:41:43 +00:00
}
2019-03-26 20:53:58 +01:00
// Map returns the members as a map, e.g. used in api to serialize as Json.
func ( comp FolderCompletion ) Map ( ) map [ string ] interface { } {
return map [ string ] interface { } {
"completion" : comp . CompletionPct ,
"needBytes" : comp . NeedBytes ,
"needItems" : comp . NeedItems ,
"globalBytes" : comp . GlobalBytes ,
"needDeletes" : comp . NeedDeletes ,
}
}
2015-04-28 22:32:10 +02:00
// Completion returns the completion status, in percent, for the given device
// and folder.
2019-02-26 09:09:25 +01:00
func ( m * model ) Completion ( device protocol . DeviceID , folder string ) FolderCompletion {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-08-05 20:16:25 +02:00
if ! ok {
2016-08-12 06:41:43 +00:00
return FolderCompletion { } // Folder doesn't exist, so we hardly have any of it
2014-08-05 20:16:25 +02:00
}
2016-10-17 14:10:17 +02:00
tot := rf . GlobalSize ( ) . Bytes
2014-08-05 20:16:25 +02:00
if tot == 0 {
2016-08-12 06:41:43 +00:00
// Folder is empty, so we have all of it
return FolderCompletion {
CompletionPct : 100 ,
}
2014-08-05 20:16:25 +02:00
}
2016-05-26 06:53:27 +00:00
m . pmut . RLock ( )
counts := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
m . pmut . RUnlock ( )
2017-12-15 20:01:56 +00:00
var need , items , fileNeed , downloaded , deletes int64
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2016-05-26 06:53:27 +00:00
ft := f . ( db . FileInfoTruncated )
2016-09-02 06:45:46 +00:00
// If the file is deleted, we account it only in the deleted column.
if ft . Deleted {
deletes ++
return true
}
2016-05-26 06:53:27 +00:00
// This might might be more than it really is, because some blocks can be of a smaller size.
2018-04-16 20:08:50 +02:00
downloaded = int64 ( counts [ ft . Name ] * int ( ft . BlockSize ( ) ) )
2016-05-26 06:53:27 +00:00
2016-09-02 06:45:46 +00:00
fileNeed = ft . FileSize ( ) - downloaded
2016-05-26 06:53:27 +00:00
if fileNeed < 0 {
fileNeed = 0
}
need += fileNeed
2017-12-15 20:01:56 +00:00
items ++
2014-07-29 11:06:52 +02:00
return true
} )
2015-10-21 09:10:26 +02:00
needRatio := float64 ( need ) / float64 ( tot )
completionPct := 100 * ( 1 - needRatio )
2016-09-02 06:45:46 +00:00
// If the completion is 100% but there are deletes we need to handle,
// drop it down a notch. Hack for consumers that look only at the
2017-07-20 13:10:46 +00:00
// percentage (our own GUI does the same calculation as here on its own
2016-09-02 06:45:46 +00:00
// and needs the same fixup).
if need == 0 && deletes > 0 {
completionPct = 95 // chosen by fair dice roll
}
2015-10-21 09:10:26 +02:00
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d = %f)" , m , device , folder , completionPct , need , tot , needRatio )
2014-08-12 13:53:31 +02:00
2016-08-12 06:41:43 +00:00
return FolderCompletion {
CompletionPct : completionPct ,
NeedBytes : need ,
2017-12-15 20:01:56 +00:00
NeedItems : items ,
2016-08-12 06:41:43 +00:00
GlobalBytes : tot ,
2016-09-02 06:45:46 +00:00
NeedDeletes : deletes ,
2016-08-12 06:41:43 +00:00
}
2014-07-29 11:06:52 +02:00
}
2016-10-17 14:10:17 +02:00
func addSizeOfFile ( s * db . Counts , f db . FileIntf ) {
switch {
case f . IsDeleted ( ) :
s . Deleted ++
case f . IsDirectory ( ) :
s . Directories ++
case f . IsSymlink ( ) :
s . Symlinks ++
default :
s . Files ++
2013-12-30 09:30:29 -05:00
}
2016-10-17 14:10:17 +02:00
s . Bytes += f . FileSize ( )
2014-01-05 16:16:37 +01:00
}
2013-12-30 09:30:29 -05:00
2014-03-28 14:36:57 +01:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2019-02-26 09:09:25 +01:00
func ( m * model ) GlobalSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-10-17 14:10:17 +02:00
return rf . GlobalSize ( )
2014-03-29 18:53:48 +01:00
}
2016-10-17 14:10:17 +02:00
return db . Counts { }
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 12:00:38 +01:00
// files in the local folder.
2019-02-26 09:09:25 +01:00
func ( m * model ) LocalSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-10-17 14:10:17 +02:00
return rf . LocalSize ( )
2014-03-29 18:53:48 +01:00
}
2016-10-17 14:10:17 +02:00
return db . Counts { }
2014-01-06 06:38:01 +01:00
}
2018-07-12 11:15:57 +03:00
// ReceiveOnlyChangedSize returns the number of files, deleted files and
// total bytes for all files that have changed locally in a receieve only
// folder.
2019-02-26 09:09:25 +01:00
func ( m * model ) ReceiveOnlyChangedSize ( folder string ) db . Counts {
2018-07-12 11:15:57 +03:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
if rf , ok := m . folderFiles [ folder ] ; ok {
return rf . ReceiveOnlyChangedSize ( )
}
return db . Counts { }
}
2014-05-19 22:31:28 +02:00
// NeedSize returns the number and total size of currently needed files.
2019-02-26 09:09:25 +01:00
func ( m * model ) NeedSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2016-10-17 14:10:17 +02:00
var result db . Counts
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-08-05 07:13:52 +00:00
cfg := m . folderCfgs [ folder ]
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2017-11-11 19:18:17 +00:00
if cfg . IgnoreDelete && f . IsDeleted ( ) {
2016-08-05 07:13:52 +00:00
return true
}
2016-10-17 14:10:17 +02:00
addSizeOfFile ( & result , f )
2014-07-15 17:54:00 +02:00
return true
} )
}
2016-10-17 14:10:17 +02:00
result . Bytes -= m . progressEmitter . BytesCompleted ( folder )
l . Debugf ( "%v NeedSize(%q): %v" , m , folder , result )
return result
2013-12-23 12:12:44 -05:00
}
2015-04-28 22:32:10 +02:00
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2019-02-26 09:09:25 +01:00
func ( m * model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2015-01-17 21:51:46 +01:00
2015-04-25 22:53:44 +01:00
rf , ok := m . folderFiles [ folder ]
if ! ok {
2017-12-15 20:01:56 +00:00
return nil , nil , nil
2015-04-25 22:53:44 +01:00
}
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
runner , ok := m . folderRunners [ folder ]
if ok {
allProgressNames , allQueuedNames := runner . Jobs ( )
var progressNames , queuedNames [ ] string
progressNames , skip , get = getChunk ( allProgressNames , skip , get )
queuedNames , skip , get = getChunk ( allQueuedNames , skip , get )
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
seen [ name ] = struct { } { }
2014-12-01 19:23:06 +00:00
}
}
2015-04-25 22:53:44 +01:00
for i , name := range queuedNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
seen [ name ] = struct { } { }
}
2014-12-01 19:23:06 +00:00
}
2014-04-09 22:03:30 +02:00
}
2015-04-25 22:53:44 +01:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
2016-08-05 07:13:52 +00:00
cfg := m . folderCfgs [ folder ]
2015-04-25 22:53:44 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2017-11-11 19:18:17 +00:00
if cfg . IgnoreDelete && f . IsDeleted ( ) {
2016-08-05 07:13:52 +00:00
return true
}
2015-04-25 22:53:44 +01:00
if skip > 0 {
skip --
return true
}
2018-12-11 09:59:04 +01:00
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
2015-04-25 22:53:44 +01:00
}
2017-12-15 20:01:56 +00:00
return get > 0
2015-04-25 22:53:44 +01:00
} )
2017-12-15 20:01:56 +00:00
return progress , queued , rest
}
2018-12-11 09:59:04 +01:00
// LocalChangedFiles returns a paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2019-02-26 09:09:25 +01:00
func ( m * model ) LocalChangedFiles ( folder string , page , perpage int ) [ ] db . FileInfoTruncated {
2018-12-11 09:59:04 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
rf , ok := m . folderFiles [ folder ]
if ! ok {
return nil
}
fcfg := m . folderCfgs [ folder ]
if fcfg . Type != config . FolderTypeReceiveOnly {
return nil
}
if rf . ReceiveOnlyChangedSize ( ) . TotalItems ( ) == 0 {
return nil
}
files := make ( [ ] db . FileInfoTruncated , 0 , perpage )
skip := ( page - 1 ) * perpage
get := perpage
rf . WithHaveTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
if ! f . IsReceiveOnlyChanged ( ) {
return true
}
if skip > 0 {
skip --
return true
}
ft := f . ( db . FileInfoTruncated )
files = append ( files , ft )
get --
return get > 0
} )
return files
}
2017-12-15 20:01:56 +00:00
// RemoteNeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2019-02-26 09:09:25 +01:00
func ( m * model ) RemoteNeedFolderFiles ( device protocol . DeviceID , folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , error ) {
2017-12-15 20:01:56 +00:00
m . fmut . RLock ( )
m . pmut . RLock ( )
2019-04-21 14:21:36 +02:00
err := m . checkDeviceFolderConnectedLocked ( device , folder )
2017-12-15 20:01:56 +00:00
rf := m . folderFiles [ folder ]
m . pmut . RUnlock ( )
m . fmut . RUnlock ( )
2019-04-21 14:21:36 +02:00
if err != nil {
return nil , err
}
2017-12-15 20:01:56 +00:00
files := make ( [ ] db . FileInfoTruncated , 0 , perpage )
skip := ( page - 1 ) * perpage
get := perpage
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
if skip > 0 {
skip --
return true
}
2018-12-11 09:59:04 +01:00
files = append ( files , f . ( db . FileInfoTruncated ) )
get --
2017-12-15 20:01:56 +00:00
return get > 0
} )
return files , nil
2014-04-01 23:18:32 +02:00
}
2014-09-28 12:00:38 +01:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2019-02-26 09:09:25 +01:00
func ( m * model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2018-01-19 14:33:16 +00:00
m . handleIndex ( deviceID , folder , fs , false )
2013-12-28 08:10:36 -05:00
}
2014-09-28 12:00:38 +01:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2019-02-26 09:09:25 +01:00
func ( m * model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2018-01-19 14:33:16 +00:00
m . handleIndex ( deviceID , folder , fs , true )
}
2019-02-26 09:09:25 +01:00
func ( m * model ) handleIndex ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , update bool ) {
2018-01-19 14:33:16 +00:00
op := "Index"
if update {
op += " update"
}
l . Debugf ( "%v (in): %s / %q: %d files" , op , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
2018-06-06 23:34:11 +02:00
if cfg , ok := m . cfg . Folder ( folder ) ; ! ok || ! cfg . SharedWith ( deviceID ) {
l . Infof ( "%v for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , op , folder , deviceID )
return
} else if cfg . Paused {
l . Debugf ( "%v for paused folder (ID %q) sent from device %q." , op , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2018-01-19 14:33:16 +00:00
files , existing := m . folderFiles [ folder ]
runner , running := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
2018-01-19 14:33:16 +00:00
if ! existing {
2019-02-14 21:29:14 +01:00
panic ( fmt . Sprintf ( "%v for nonexistent folder %q" , op , folder ) )
2018-01-19 14:33:16 +00:00
}
if running {
defer runner . SchedulePull ( )
} else if update {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
2019-02-14 21:29:14 +01:00
panic ( fmt . Sprintf ( "%v for not running folder %q" , op , folder ) )
2013-12-28 08:10:36 -05:00
}
2014-07-13 21:07:24 +02:00
2016-05-01 06:49:29 +00:00
m . pmut . RLock ( )
m . deviceDownloads [ deviceID ] . Update ( folder , makeForgetUpdate ( fs ) )
m . pmut . RUnlock ( )
2018-01-19 14:33:16 +00:00
if ! update {
files . Drop ( deviceID )
}
2018-06-24 09:50:18 +02:00
for i := range fs {
// The local flags should never be transmitted over the wire. Make
// sure they look like they weren't.
fs [ i ] . LocalFlags = 0
}
2014-09-28 12:00:38 +01:00
files . Update ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2016-07-29 19:54:24 +00:00
"version" : files . Sequence ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2014-01-09 10:59:09 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfig ) {
2016-01-01 20:11:12 +01:00
// Check the peer device's announced folders against our own. Emits events
// for folders that we don't expect (unknown or not shared).
2016-04-15 10:59:41 +00:00
// Also, collect a list of folders we do share, and if he's interested in
// temporary indexes, subscribe the connection.
tempIndexFolders := make ( [ ] string , 0 , len ( cm . Folders ) )
2016-01-01 20:11:12 +01:00
2016-07-23 12:46:31 +00:00
m . pmut . RLock ( )
2016-07-27 21:36:25 +00:00
conn , ok := m . conn [ deviceID ]
2016-12-17 19:48:33 +00:00
hello := m . helloMessages [ deviceID ]
2016-07-23 12:46:31 +00:00
m . pmut . RUnlock ( )
2016-07-27 21:36:25 +00:00
if ! ok {
panic ( "bug: ClusterConfig called on closed or nonexistent connection" )
}
2016-07-23 12:46:31 +00:00
2017-12-07 07:08:24 +00:00
changed := false
deviceCfg := m . cfg . Devices ( ) [ deviceID ]
2016-07-27 21:38:43 +00:00
2016-12-17 19:48:33 +00:00
// See issue #3802 - in short, we can't send modern symlink entries to older
// clients.
dropSymlinks := false
if hello . ClientName == m . clientName && upgrade . CompareVersions ( hello . ClientVersion , "v0.14.14" ) < 0 {
l . Warnln ( "Not sending symlinks to old client" , deviceID , "- please upgrade to v0.14.14 or newer" )
dropSymlinks = true
}
2017-12-07 07:08:24 +00:00
// Needs to happen outside of the fmut, as can cause CommitConfiguration
if deviceCfg . AutoAcceptFolders {
for _ , folder := range cm . Folders {
changed = m . handleAutoAccepts ( deviceCfg , folder ) || changed
}
}
2016-07-04 10:40:29 +00:00
m . fmut . Lock ( )
2016-12-21 18:41:25 +00:00
var paused [ ] string
2016-01-01 20:11:12 +01:00
for _ , folder := range cm . Folders {
2018-06-06 23:34:11 +02:00
cfg , ok := m . cfg . Folder ( folder . ID )
if ! ok || ! cfg . SharedWith ( deviceID ) {
2018-08-25 11:36:10 +01:00
if deviceCfg . IgnoredFolder ( folder . ID ) {
2018-06-06 23:34:11 +02:00
l . Infof ( "Ignoring folder %s from device %s since we are configured to" , folder . Description ( ) , deviceID )
continue
}
2018-08-25 11:36:10 +01:00
m . cfg . AddOrUpdatePendingFolder ( folder . ID , folder . Label , deviceID )
2016-01-01 20:11:12 +01:00
events . Default . Log ( events . FolderRejected , map [ string ] string {
2016-03-11 09:48:46 +00:00
"folder" : folder . ID ,
"folderLabel" : folder . Label ,
"device" : deviceID . String ( ) ,
2016-01-01 20:11:12 +01:00
} )
2016-11-22 08:36:14 +01:00
l . Infof ( "Unexpected folder %s sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder . Description ( ) , deviceID )
2016-01-01 20:11:12 +01:00
continue
}
2018-06-06 23:34:11 +02:00
if folder . Paused {
paused = append ( paused , folder . ID )
continue
}
if cfg . Paused {
continue
}
2018-08-11 09:10:29 +02:00
fs , ok := m . folderFiles [ folder . ID ]
if ! ok {
// Shouldn't happen because !cfg.Paused, but might happen
// if the folder is about to be unpaused, but not yet.
continue
}
2017-12-07 07:08:24 +00:00
2016-07-04 10:40:29 +00:00
if ! folder . DisableTempIndexes {
2016-04-15 10:59:41 +00:00
tempIndexFolders = append ( tempIndexFolders , folder . ID )
}
2016-07-23 12:46:31 +00:00
myIndexID := fs . IndexID ( protocol . LocalDeviceID )
2016-07-29 19:54:24 +00:00
mySequence := fs . Sequence ( protocol . LocalDeviceID )
var startSequence int64
2016-07-23 12:46:31 +00:00
for _ , dev := range folder . Devices {
2016-10-29 21:56:24 +00:00
if dev . ID == m . id {
2016-07-23 12:46:31 +00:00
// This is the other side's description of what it knows
// about us. Lets check to see if we can start sending index
// updates directly or need to send the index from start...
if dev . IndexID == myIndexID {
// They say they've seen our index ID before, so we can
// send a delta update only.
2016-07-29 19:54:24 +00:00
if dev . MaxSequence > mySequence {
2016-07-23 12:46:31 +00:00
// Safety check. They claim to have more or newer
// index data than we have - either we have lost
// index data, or reset the index without resetting
// the IndexID, or something else weird has
// happened. We send a full index to reset the
// situation.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s is delta index compatible, but seems out of sync with reality" , deviceID , folder . Description ( ) )
2016-07-29 19:54:24 +00:00
startSequence = 0
2016-07-23 12:46:31 +00:00
continue
}
2016-11-22 08:36:14 +01:00
l . Debugf ( "Device %v folder %s is delta index compatible (mlv=%d)" , deviceID , folder . Description ( ) , dev . MaxSequence )
2016-07-29 19:54:24 +00:00
startSequence = dev . MaxSequence
2016-07-23 12:46:31 +00:00
} else if dev . IndexID != 0 {
// They say they've seen an index ID from us, but it's
// not the right one. Either they are confused or we
// must have reset our database since last talking to
// them. We'll start with a full index transfer.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s has mismatching index ID for us (%v != %v)" , deviceID , folder . Description ( ) , dev . IndexID , myIndexID )
2016-07-29 19:54:24 +00:00
startSequence = 0
2016-07-23 12:46:31 +00:00
}
2016-10-29 21:56:24 +00:00
} else if dev . ID == deviceID && dev . IndexID != 0 {
2016-07-23 12:46:31 +00:00
// This is the other side's description of themselves. We
// check to see that it matches the IndexID we have on file,
// otherwise we drop our old index data and expect to get a
// completely new set.
theirIndexID := fs . IndexID ( deviceID )
if dev . IndexID == 0 {
// They're not announcing an index ID. This means they
// do not support delta indexes and we should clear any
// information we have from them before accepting their
// index, which will presumably be a full index.
2017-11-12 20:20:34 +00:00
fs . Drop ( deviceID )
2016-07-23 12:46:31 +00:00
} else if dev . IndexID != theirIndexID {
// The index ID we have on file is not what they're
// announcing. They must have reset their database and
// will probably send us a full index. We drop any
// information we have and remember this new index ID
// instead.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s has a new index ID (%v)" , deviceID , folder . Description ( ) , dev . IndexID )
2017-11-12 20:20:34 +00:00
fs . Drop ( deviceID )
2016-07-23 12:46:31 +00:00
fs . SetIndexID ( deviceID , dev . IndexID )
2016-07-27 21:35:41 +00:00
} else {
// They're sending a recognized index ID and will most
// likely use delta indexes. We might already have files
// that we need to pull so let the folder runner know
// that it should recheck the index data.
if runner := m . folderRunners [ folder . ID ] ; runner != nil {
2017-11-07 06:59:35 +00:00
defer runner . SchedulePull ( )
2016-07-27 21:35:41 +00:00
}
2016-07-23 12:46:31 +00:00
}
}
}
2019-03-10 17:05:39 +01:00
go sendIndexes ( conn , folder . ID , fs , startSequence , dropSymlinks )
2016-01-01 20:11:12 +01:00
}
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
m . remotePausedFolders [ deviceID ] = paused
m . pmut . Unlock ( )
2016-04-15 10:59:41 +00:00
// This breaks if we send multiple CM messages during the same connection.
if len ( tempIndexFolders ) > 0 {
m . pmut . RLock ( )
conn , ok := m . conn [ deviceID ]
m . pmut . RUnlock ( )
// In case we've got ClusterConfig, and the connection disappeared
// from infront of our nose.
if ok {
m . progressEmitter . temporaryIndexSubscribe ( conn , tempIndexFolders )
}
}
2017-12-07 07:08:24 +00:00
if deviceCfg . Introducer {
2016-11-07 16:40:48 +00:00
foldersDevices , introduced := m . handleIntroductions ( deviceCfg , cm )
if introduced {
changed = true
}
// If permitted, check if the introducer has unshare devices/folders with
// some of the devices/folders that we know were introduced to us by him.
if ! deviceCfg . SkipIntroductionRemovals && m . handleDeintroductions ( deviceCfg , cm , foldersDevices ) {
changed = true
}
}
2016-11-17 08:50:24 +00:00
m . fmut . Unlock ( )
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
if changed {
if err := m . cfg . Save ( ) ; err != nil {
l . Warnln ( "Failed to save config" , err )
}
}
}
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
// handleIntroductions handles adding devices/shares that are shared by an introducer device
2019-02-26 09:09:25 +01:00
func ( m * model ) handleIntroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig ) ( folderDeviceSet , bool ) {
2016-11-07 16:40:48 +00:00
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing, remove what we have extra that
// has been introducer by the introducer.
changed := false
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
foldersDevices := make ( folderDeviceSet )
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
for _ , folder := range cm . Folders {
// Adds devices which we do not have, but the introducer has
// for the folders that we have in common. Also, shares folders
// with devices that we have in common, yet are currently not sharing
// the folder.
2018-01-03 07:42:25 +00:00
fcfg , ok := m . cfg . Folder ( folder . ID )
if ! ok {
// Don't have this folder, carry on.
continue
}
2016-11-07 16:40:48 +00:00
for _ , device := range folder . Devices {
2017-12-07 07:08:24 +00:00
// No need to share with self.
if device . ID == m . id {
continue
}
2016-11-07 16:40:48 +00:00
foldersDevices . set ( device . ID , folder . ID )
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
if _ , ok := m . cfg . Devices ( ) [ device . ID ] ; ! ok {
// The device is currently unknown. Add it to the config.
m . introduceDevice ( device , introducerCfg )
2018-06-06 23:34:11 +02:00
} else if fcfg . SharedWith ( device . ID ) {
// We already share the folder with this device, so
// nothing to do.
continue
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2017-12-07 07:08:24 +00:00
l . Infof ( "Sharing folder %s with %v (vouched for by introducer %v)" , folder . Description ( ) , device . ID , introducerCfg . DeviceID )
2018-01-03 07:42:25 +00:00
fcfg . Devices = append ( fcfg . Devices , config . FolderDeviceConfiguration {
DeviceID : device . ID ,
IntroducedBy : introducerCfg . DeviceID ,
} )
2016-11-07 16:40:48 +00:00
changed = true
}
2018-01-03 07:42:25 +00:00
if changed {
2019-02-02 12:16:27 +01:00
m . cfg . SetFolder ( fcfg )
2018-01-03 07:42:25 +00:00
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
return foldersDevices , changed
}
2016-11-07 16:40:48 +00:00
2017-12-07 07:08:24 +00:00
// handleDeintroductions handles removals of devices/shares that are removed by an introducer device
2019-02-26 09:09:25 +01:00
func ( m * model ) handleDeintroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig , foldersDevices folderDeviceSet ) bool {
2016-11-07 16:40:48 +00:00
changed := false
2018-06-06 23:34:11 +02:00
devicesNotIntroduced := make ( map [ protocol . DeviceID ] struct { } )
2016-11-07 16:40:48 +00:00
2018-06-06 23:34:11 +02:00
folders := m . cfg . FolderList ( )
2016-11-07 16:40:48 +00:00
// Check if we should unshare some folders, if the introducer has unshared them.
2018-06-06 23:34:11 +02:00
for i := range folders {
for k := 0 ; k < len ( folders [ i ] . Devices ) ; k ++ {
if folders [ i ] . Devices [ k ] . IntroducedBy != introducerCfg . DeviceID {
devicesNotIntroduced [ folders [ i ] . Devices [ k ] . DeviceID ] = struct { } { }
continue
}
if ! foldersDevices . has ( folders [ i ] . Devices [ k ] . DeviceID , folders [ i ] . ID ) {
// We could not find that folder shared on the
// introducer with the device that was introduced to us.
// We should follow and unshare as well.
l . Infof ( "Unsharing folder %s with %v as introducer %v no longer shares the folder with that device" , folders [ i ] . Description ( ) , folders [ i ] . Devices [ k ] . DeviceID , folders [ i ] . Devices [ k ] . IntroducedBy )
folders [ i ] . Devices = append ( folders [ i ] . Devices [ : k ] , folders [ i ] . Devices [ k + 1 : ] ... )
k --
changed = true
2016-11-07 16:40:48 +00:00
}
}
}
2016-11-07 16:40:48 +00:00
2016-11-17 08:56:55 +00:00
// Check if we should remove some devices, if the introducer no longer
// shares any folder with them. Yet do not remove if we share other
// folders that haven't been introduced by the introducer.
2018-06-06 23:34:11 +02:00
devMap := m . cfg . Devices ( )
devices := make ( [ ] config . DeviceConfiguration , 0 , len ( devMap ) )
for deviceID , device := range devMap {
2016-11-17 08:56:55 +00:00
if device . IntroducedBy == introducerCfg . DeviceID {
2018-06-06 23:34:11 +02:00
if ! foldersDevices . hasDevice ( deviceID ) {
if _ , ok := devicesNotIntroduced [ deviceID ] ; ! ok {
// The introducer no longer shares any folder with the
// device, remove the device.
l . Infof ( "Removing device %v as introducer %v no longer shares any folders with that device" , deviceID , device . IntroducedBy )
changed = true
2016-11-07 16:40:48 +00:00
continue
}
2018-06-06 23:34:11 +02:00
l . Infof ( "Would have removed %v as %v no longer shares any folders, yet there are other folders that are shared with this device that haven't been introduced by this introducer." , deviceID , device . IntroducedBy )
2016-11-12 08:38:29 +01:00
}
}
2018-06-06 23:34:11 +02:00
devices = append ( devices , device )
}
if changed {
cfg := m . cfg . RawCopy ( )
cfg . Folders = folders
cfg . Devices = devices
2019-02-02 12:16:27 +01:00
m . cfg . Replace ( cfg )
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
return changed
}
2017-12-07 07:08:24 +00:00
// handleAutoAccepts handles adding and sharing folders for devices that have
// AutoAcceptFolders set to true.
2019-02-26 09:09:25 +01:00
func ( m * model ) handleAutoAccepts ( deviceCfg config . DeviceConfiguration , folder protocol . Folder ) bool {
2018-01-03 07:42:25 +00:00
if cfg , ok := m . cfg . Folder ( folder . ID ) ; ! ok {
2017-12-07 07:08:24 +00:00
defaultPath := m . cfg . Options ( ) . DefaultFolderPath
defaultPathFs := fs . NewFilesystem ( fs . FilesystemTypeBasic , defaultPath )
2019-01-05 18:10:02 +01:00
pathAlternatives := [ ] string {
sanitizePath ( folder . Label ) ,
sanitizePath ( folder . ID ) ,
}
for _ , path := range pathAlternatives {
2017-12-07 07:08:24 +00:00
if _ , err := defaultPathFs . Lstat ( path ) ; ! fs . IsNotExist ( err ) {
continue
}
fcfg := config . NewFolderConfiguration ( m . id , folder . ID , folder . Label , fs . FilesystemTypeBasic , filepath . Join ( defaultPath , path ) )
2018-01-03 07:42:25 +00:00
fcfg . Devices = append ( fcfg . Devices , config . FolderDeviceConfiguration {
DeviceID : deviceCfg . DeviceID ,
} )
2017-12-07 07:08:24 +00:00
// Need to wait for the waiter, as this calls CommitConfiguration,
// which sets up the folder and as we return from this call,
// ClusterConfig starts poking at m.folderFiles and other things
// that might not exist until the config is committed.
w , _ := m . cfg . SetFolder ( fcfg )
w . Wait ( )
l . Infof ( "Auto-accepted %s folder %s at path %s" , deviceCfg . DeviceID , folder . Description ( ) , fcfg . Path )
return true
}
l . Infof ( "Failed to auto-accept folder %s from %s due to path conflict" , folder . Description ( ) , deviceCfg . DeviceID )
return false
2018-01-03 07:42:25 +00:00
} else {
for _ , device := range cfg . DeviceIDs ( ) {
if device == deviceCfg . DeviceID {
// Already shared nothing todo.
return false
}
}
cfg . Devices = append ( cfg . Devices , config . FolderDeviceConfiguration {
DeviceID : deviceCfg . DeviceID ,
} )
w , _ := m . cfg . SetFolder ( cfg )
w . Wait ( )
l . Infof ( "Shared %s with %s due to auto-accept" , folder . ID , deviceCfg . DeviceID )
return true
2017-12-07 07:08:24 +00:00
}
}
2019-02-26 09:09:25 +01:00
func ( m * model ) introduceDevice ( device protocol . Device , introducerCfg config . DeviceConfiguration ) {
2016-11-07 16:40:48 +00:00
addresses := [ ] string { "dynamic" }
for _ , addr := range device . Addresses {
if addr != "dynamic" {
addresses = append ( addresses , addr )
}
}
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , device . ID , introducerCfg . DeviceID )
newDeviceCfg := config . DeviceConfiguration {
DeviceID : device . ID ,
Name : device . Name ,
Compression : introducerCfg . Compression ,
Addresses : addresses ,
CertName : device . CertName ,
IntroducedBy : introducerCfg . DeviceID ,
}
// The introducers' introducers are also our introducers.
if device . Introducer {
l . Infof ( "Device %v is now also an introducer" , device . ID )
newDeviceCfg . Introducer = true
newDeviceCfg . SkipIntroductionRemovals = device . SkipIntroductionRemovals
2014-09-23 16:04:20 +02:00
}
2016-11-07 16:40:48 +00:00
2019-02-02 12:16:27 +01:00
m . cfg . SetDevice ( newDeviceCfg )
2016-11-07 16:40:48 +00:00
}
2016-08-10 09:37:32 +00:00
// Closed is called when a connection has been closed
2019-02-26 09:09:25 +01:00
func ( m * model ) Closed ( conn protocol . Connection , err error ) {
2016-08-10 09:37:32 +00:00
device := conn . ID ( )
2014-02-09 23:13:06 +01:00
2014-07-15 13:04:37 +02:00
m . pmut . Lock ( )
2015-06-28 16:05:29 +01:00
conn , ok := m . conn [ device ]
2014-01-01 08:09:17 -05:00
if ok {
2016-04-15 10:59:41 +00:00
m . progressEmitter . temporaryIndexUnsubscribe ( conn )
2013-12-30 21:21:57 -05:00
}
2015-06-28 16:05:29 +01:00
delete ( m . conn , device )
2018-11-13 08:53:55 +01:00
delete ( m . connRequestLimiters , device )
2016-03-25 20:29:07 +00:00
delete ( m . helloMessages , device )
2016-04-15 10:59:41 +00:00
delete ( m . deviceDownloads , device )
2016-12-21 18:41:25 +00:00
delete ( m . remotePausedFolders , device )
2016-08-10 09:37:32 +00:00
closed := m . closed [ device ]
delete ( m . closed , device )
m . pmut . Unlock ( )
2018-01-12 11:27:55 +00:00
l . Infof ( "Connection to %s at %s closed: %v" , device , conn . Name ( ) , err )
2016-08-10 09:37:32 +00:00
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
"error" : err . Error ( ) ,
} )
close ( closed )
}
2016-12-21 18:41:25 +00:00
// close will close the underlying connection for a given device
2019-02-26 09:09:25 +01:00
func ( m * model ) close ( device protocol . DeviceID , err error ) {
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
2019-01-09 17:31:09 +01:00
m . closeLocked ( device , err )
2016-12-21 18:41:25 +00:00
m . pmut . Unlock ( )
}
// closeLocked will close the underlying connection for a given device
2019-02-26 09:09:25 +01:00
func ( m * model ) closeLocked ( device protocol . DeviceID , err error ) {
2016-12-21 18:41:25 +00:00
conn , ok := m . conn [ device ]
if ! ok {
// There is no connection to close
return
}
2019-01-09 17:31:09 +01:00
conn . Close ( err )
2016-12-21 18:41:25 +00:00
}
2018-11-13 08:53:55 +01:00
// Implements protocol.RequestResponse
type requestResponse struct {
data [ ] byte
closed chan struct { }
once stdsync . Once
}
func newRequestResponse ( size int ) * requestResponse {
return & requestResponse {
data : protocol . BufferPool . Get ( size ) ,
closed : make ( chan struct { } ) ,
2015-01-18 02:12:06 +01:00
}
2018-11-13 08:53:55 +01:00
}
2015-01-18 02:12:06 +01:00
2018-11-13 08:53:55 +01:00
func ( r * requestResponse ) Data ( ) [ ] byte {
return r . data
}
func ( r * requestResponse ) Close ( ) {
r . once . Do ( func ( ) {
protocol . BufferPool . Put ( r . data )
close ( r . closed )
} )
}
func ( r * requestResponse ) Wait ( ) {
<- r . closed
}
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2019-02-26 09:09:25 +01:00
func ( m * model ) Request ( deviceID protocol . DeviceID , folder , name string , size int32 , offset int64 , hash [ ] byte , weakHash uint32 , fromTemporary bool ) ( out protocol . RequestResponse , err error ) {
2018-11-13 08:53:55 +01:00
if size < 0 || offset < 0 {
return nil , protocol . ErrInvalid
2018-04-09 21:55:52 +02:00
}
2018-08-15 16:33:03 +02:00
m . fmut . RLock ( )
folderCfg , ok := m . folderCfgs [ folder ]
2018-04-09 21:55:52 +02:00
folderIgnores := m . folderIgnores [ folder ]
m . fmut . RUnlock ( )
2018-08-15 16:33:03 +02:00
if ! ok {
// The folder might be already unpaused in the config, but not yet
// in the model.
l . Debugf ( "Request from %s for file %s in unstarted folder %q" , deviceID , name , folder )
2019-04-10 11:47:24 +02:00
return nil , protocol . ErrGeneric
2018-11-13 08:53:55 +01:00
}
if ! folderCfg . SharedWith ( deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2019-04-10 11:47:24 +02:00
return nil , protocol . ErrGeneric
2018-11-13 08:53:55 +01:00
}
if folderCfg . Paused {
l . Debugf ( "Request from %s for file %s in paused folder %q" , deviceID , name , folder )
2019-04-10 11:47:24 +02:00
return nil , protocol . ErrGeneric
2018-08-15 16:33:03 +02:00
}
2018-04-09 21:55:52 +02:00
2018-06-06 23:34:11 +02:00
// Make sure the path is valid and in canonical form
if name , err = fs . Canonicalize ( name ) ; err != nil {
l . Debugf ( "Request from %s in folder %q for invalid filename %s" , deviceID , folder , name )
2019-04-10 11:47:24 +02:00
return nil , protocol . ErrGeneric
2018-04-09 21:55:52 +02:00
}
2015-10-03 17:25:21 +02:00
if deviceID != protocol . LocalDeviceID {
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d t=%v" , m , deviceID , folder , name , offset , size , fromTemporary )
2013-12-15 11:43:31 +01:00
}
2014-11-09 04:26:52 +00:00
2017-09-02 05:52:38 +00:00
if fs . IsInternal ( name ) {
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) for internal file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
2019-04-10 11:47:24 +02:00
return nil , protocol . ErrInvalid
2016-12-01 14:00:11 +00:00
}
if folderIgnores . Match ( name ) . IsIgnored ( ) {
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
2019-04-10 11:47:24 +02:00
return nil , protocol . ErrInvalid
2015-10-13 22:59:31 +09:00
}
2018-11-13 08:53:55 +01:00
folderFs := folderCfg . Filesystem ( )
2017-08-19 14:36:56 +00:00
if err := osutil . TraversesSymlink ( folderFs , filepath . Dir ( name ) ) ; err != nil {
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
}
// Restrict parallel requests by connection/device
m . pmut . RLock ( )
limiter := m . connRequestLimiters [ deviceID ]
m . pmut . RUnlock ( )
if limiter != nil {
limiter . take ( int ( size ) )
}
// The requestResponse releases the bytes to the limiter when its Close method is called.
res := newRequestResponse ( int ( size ) )
defer func ( ) {
// Close it ourselves if it isn't returned due to an error
if err != nil {
res . Close ( )
}
} ( )
if limiter != nil {
go func ( ) {
res . Wait ( )
limiter . give ( int ( size ) )
} ( )
2016-04-15 10:59:41 +00:00
}
2014-12-08 11:54:22 +00:00
2016-04-15 10:59:41 +00:00
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
2016-07-04 10:40:29 +00:00
if fromTemporary && ! folderCfg . DisableTempIndexes {
2017-09-02 05:52:38 +00:00
tempFn := fs . TempName ( name )
2016-12-13 10:24:10 +00:00
2017-08-19 14:36:56 +00:00
if info , err := folderFs . Lstat ( tempFn ) ; err != nil || ! info . IsRegular ( ) {
2016-12-13 10:24:10 +00:00
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) failed stating temp file (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2016-12-13 10:24:10 +00:00
}
2018-11-13 08:53:55 +01:00
err := readOffsetIntoBuf ( folderFs , tempFn , offset , res . data )
if err == nil && scanner . Validate ( res . data , hash , weakHash ) {
return res , nil
2016-04-15 10:59:41 +00:00
}
// Fall through to reading from a non-temp file, just incase the temp
// file has finished downloading.
2013-12-15 11:43:31 +01:00
}
2017-08-19 14:36:56 +00:00
if info , err := folderFs . Lstat ( name ) ; err != nil || ! info . IsRegular ( ) {
2016-12-13 10:24:10 +00:00
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) failed stating file (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2016-12-13 10:24:10 +00:00
}
2018-11-13 08:53:55 +01:00
if err := readOffsetIntoBuf ( folderFs , name , offset , res . data ) ; fs . IsNotExist ( err ) {
l . Debugf ( "%v REQ(in) file doesn't exist: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2016-04-15 10:59:41 +00:00
} else if err != nil {
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) failed reading file (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrGeneric
2013-12-15 11:43:31 +01:00
}
2018-04-09 21:55:52 +02:00
2018-11-13 08:53:55 +01:00
if ! scanner . Validate ( res . data , hash , weakHash ) {
2019-04-12 15:21:07 +02:00
m . recheckFile ( deviceID , folderFs , folder , name , size , offset , hash )
2018-11-13 08:53:55 +01:00
l . Debugf ( "%v REQ(in) failed validating data (%v): %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , size )
return nil , protocol . ErrNoSuchFile
2018-05-05 09:24:44 +01:00
}
2018-11-13 08:53:55 +01:00
return res , nil
2013-12-15 11:43:31 +01:00
}
2019-04-12 15:21:07 +02:00
func ( m * model ) recheckFile ( deviceID protocol . DeviceID , folderFs fs . Filesystem , folder , name string , size int32 , offset int64 , hash [ ] byte ) {
2018-05-05 09:24:44 +01:00
cf , ok := m . CurrentFolderFile ( folder , name )
if ! ok {
l . Debugf ( "%v recheckFile: %s: %q / %q: no current file" , m , deviceID , folder , name )
return
}
if cf . IsDeleted ( ) || cf . IsInvalid ( ) || cf . IsSymlink ( ) || cf . IsDirectory ( ) {
l . Debugf ( "%v recheckFile: %s: %q / %q: not a regular file" , m , deviceID , folder , name )
return
}
2019-04-12 15:21:07 +02:00
blockIndex := int ( offset ) / cf . BlockSize ( )
2018-06-13 19:07:52 +02:00
if blockIndex >= len ( cf . Blocks ) {
2018-05-05 09:24:44 +01:00
l . Debugf ( "%v recheckFile: %s: %q / %q i=%d: block index too far" , m , deviceID , folder , name , blockIndex )
return
}
block := cf . Blocks [ blockIndex ]
// Seems to want a different version of the file, whatever.
if ! bytes . Equal ( block . Hash , hash ) {
l . Debugf ( "%v recheckFile: %s: %q / %q i=%d: hash mismatch %x != %x" , m , deviceID , folder , name , blockIndex , block . Hash , hash )
return
}
// The hashes provided part of the request match what we expect to find according
// to what we have in the database, yet the content we've read off the filesystem doesn't
// Something is fishy, invalidate the file and rescan it.
// The file will temporarily become invalid, which is ok as the content is messed up.
2019-04-07 13:29:17 +02:00
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
l . Debugf ( "%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled" , m , deviceID , folder , name )
return
}
if err := runner . ForceRescan ( cf ) ; err != nil {
2018-05-05 09:24:44 +01:00
l . Debugf ( "%v recheckFile: %s: %q / %q rescan: %s" , m , deviceID , folder , name , err )
2019-04-07 13:29:17 +02:00
return
2018-05-05 09:24:44 +01:00
}
2019-04-07 13:29:17 +02:00
l . Debugf ( "%v recheckFile: %s: %q / %q" , m , deviceID , folder , name )
2018-05-05 09:24:44 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
2016-12-06 08:54:04 +00:00
return fs . Get ( protocol . LocalDeviceID , file )
2014-04-01 23:18:32 +02:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
2016-12-06 08:54:04 +00:00
return fs . GetGlobal ( file )
2014-04-01 23:18:32 +02:00
}
2018-02-24 08:51:29 +01:00
// Connection returns the current connection for device, and a boolean whether a connection was found.
2019-02-26 09:09:25 +01:00
func ( m * model ) Connection ( deviceID protocol . DeviceID ) ( connections . Connection , bool ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2017-11-21 07:25:38 +00:00
cn , ok := m . conn [ deviceID ]
2014-09-20 19:14:45 +02:00
m . pmut . RUnlock ( )
2014-09-10 11:29:01 +02:00
if ok {
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-10 11:29:01 +02:00
}
2017-11-21 07:25:38 +00:00
return cn , ok
2014-01-06 11:11:18 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2017-08-12 17:10:43 +00:00
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2017-08-22 06:48:25 +00:00
if ! ok {
cfg , ok = m . cfg . Folders ( ) [ folder ]
if ! ok {
return nil , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2017-04-01 09:58:06 +00:00
}
2017-08-22 06:48:25 +00:00
}
2014-09-14 23:03:53 +01:00
2018-03-18 01:42:31 +01:00
// On creation a new folder with ignore patterns validly has no marker yet.
if err := cfg . CheckPath ( ) ; err != nil && err != config . ErrMarkerMissing {
2017-08-22 06:48:25 +00:00
return nil , nil , err
}
2015-12-30 21:30:47 +00:00
2017-08-22 06:48:25 +00:00
ignores , ok := m . folderIgnores [ folder ]
2018-05-08 22:37:13 +01:00
if ! ok {
ignores = ignore . New ( fs . NewFilesystem ( cfg . FilesystemType , cfg . Path ) )
2014-09-14 23:03:53 +01:00
}
2017-08-22 06:48:25 +00:00
if err := ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
return nil , nil , err
2014-09-14 23:03:53 +01:00
}
2017-08-22 06:48:25 +00:00
return ignores . Lines ( ) , ignores . Patterns ( ) , nil
2014-09-14 23:03:53 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) SetIgnores ( folder string , content [ ] string ) error {
2017-04-01 09:58:06 +00:00
cfg , ok := m . cfg . Folders ( ) [ folder ]
2014-09-14 23:03:53 +01:00
if ! ok {
2018-05-05 10:30:39 +02:00
return fmt . Errorf ( "folder %s does not exist" , cfg . Description ( ) )
}
err := cfg . CheckPath ( )
if err == config . ErrPathMissing {
if err = cfg . CreateRoot ( ) ; err != nil {
return fmt . Errorf ( "failed to create folder root: %v" , err )
}
err = cfg . CheckPath ( )
}
if err != nil && err != config . ErrMarkerMissing {
return err
2014-09-14 23:03:53 +01:00
}
2017-08-19 14:36:56 +00:00
if err := ignore . WriteIgnores ( cfg . Filesystem ( ) , ".stignore" , content ) ; err != nil {
2014-09-14 23:03:53 +01:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2017-04-01 09:58:06 +00:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ok {
return runner . Scan ( nil )
2014-09-14 23:03:53 +01:00
}
2017-04-01 09:58:06 +00:00
return nil
2014-09-14 23:03:53 +01:00
}
2016-03-25 20:29:07 +00:00
// OnHello is called when an device connects to us.
// This allows us to extract some information from the Hello message
// and add it to a list of known devices ahead of any checks.
2019-02-26 09:09:25 +01:00
func ( m * model ) OnHello ( remoteID protocol . DeviceID , addr net . Addr , hello protocol . HelloResult ) error {
2016-08-05 09:29:49 +00:00
if m . cfg . IgnoredDevice ( remoteID ) {
return errDeviceIgnored
}
2017-04-01 09:52:31 +00:00
cfg , ok := m . cfg . Device ( remoteID )
if ! ok {
2018-08-25 11:36:10 +01:00
m . cfg . AddOrUpdatePendingDevice ( remoteID , hello . DeviceName , addr . String ( ) )
2017-04-01 09:52:31 +00:00
events . Default . Log ( events . DeviceRejected , map [ string ] string {
"name" : hello . DeviceName ,
"device" : remoteID . String ( ) ,
"address" : addr . String ( ) ,
} )
return errDeviceUnknown
2016-03-25 20:29:07 +00:00
}
2016-08-05 09:29:49 +00:00
2017-04-01 09:52:31 +00:00
if cfg . Paused {
return errDevicePaused
}
2016-08-05 09:29:49 +00:00
2017-04-01 09:52:31 +00:00
if len ( cfg . AllowedNetworks ) > 0 {
if ! connections . IsAllowedNetwork ( addr . String ( ) , cfg . AllowedNetworks ) {
return errNetworkNotAllowed
}
}
return nil
2016-03-25 20:29:07 +00:00
}
// GetHello is called when we are about to connect to some remote device.
2019-02-26 09:09:25 +01:00
func ( m * model ) GetHello ( id protocol . DeviceID ) protocol . HelloIntf {
2017-05-22 19:58:33 +00:00
name := ""
if _ , ok := m . cfg . Device ( id ) ; ok {
name = m . cfg . MyName ( )
}
2016-07-04 10:40:29 +00:00
return & protocol . Hello {
2017-05-22 19:58:33 +00:00
DeviceName : name ,
2016-03-25 20:29:07 +00:00
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
}
}
2014-01-06 11:11:18 +01:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 12:00:38 +01:00
// folder changes.
2019-02-26 09:09:25 +01:00
func ( m * model ) AddConnection ( conn connections . Connection , hello protocol . HelloResult ) {
2015-06-28 16:05:29 +01:00
deviceID := conn . ID ( )
2018-11-13 08:53:55 +01:00
device , ok := m . cfg . Device ( deviceID )
if ! ok {
l . Infoln ( "Trying to add connection to unknown device" )
return
}
2014-07-15 13:04:37 +02:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2016-08-10 09:37:32 +00:00
if oldConn , ok := m . conn [ deviceID ] ; ok {
l . Infoln ( "Replacing old connection" , oldConn , "with" , conn , "for" , deviceID )
// There is an existing connection to this device that we are
// replacing. We must close the existing connection and wait for the
// close to complete before adding the new connection. We do the
// actual close without holding pmut as the connection will call
// back into Closed() for the cleanup.
closed := m . closed [ deviceID ]
m . pmut . Unlock ( )
2019-01-09 17:31:09 +01:00
oldConn . Close ( errReplacingConnection )
2016-08-10 09:37:32 +00:00
<- closed
m . pmut . Lock ( )
2014-03-23 08:45:05 +01:00
}
2016-08-10 09:37:32 +00:00
2015-06-28 16:05:29 +01:00
m . conn [ deviceID ] = conn
2016-08-10 09:37:32 +00:00
m . closed [ deviceID ] = make ( chan struct { } )
2016-04-15 10:59:41 +00:00
m . deviceDownloads [ deviceID ] = newDeviceDownloadState ( )
2018-11-13 08:53:55 +01:00
// 0: default, <0: no limiting
switch {
case device . MaxRequestKiB > 0 :
m . connRequestLimiters [ deviceID ] = newByteSemaphore ( 1024 * device . MaxRequestKiB )
case device . MaxRequestKiB == 0 :
m . connRequestLimiters [ deviceID ] = newByteSemaphore ( 1024 * defaultPullerPendingKiB )
}
2014-01-06 11:11:18 +01:00
2016-03-25 20:29:07 +00:00
m . helloMessages [ deviceID ] = hello
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"deviceName" : hello . DeviceName ,
"clientName" : hello . ClientName ,
"clientVersion" : hello . ClientVersion ,
2016-11-30 07:54:20 +00:00
"type" : conn . Type ( ) ,
2016-03-25 20:29:07 +00:00
}
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
events . Default . Log ( events . DeviceConnected , event )
2018-01-12 11:27:55 +00:00
l . Infof ( ` Device %s client is "%s %s" named "%s" at %s ` , deviceID , hello . ClientName , hello . ClientVersion , hello . DeviceName , conn )
2016-03-25 20:29:07 +00:00
2015-06-28 16:05:29 +01:00
conn . Start ( )
2016-12-21 12:22:18 +00:00
m . pmut . Unlock ( )
2015-07-10 16:37:57 +10:00
2016-12-21 12:22:18 +00:00
// Acquires fmut, so has to be done outside of pmut.
2015-11-17 12:08:53 +01:00
cm := m . generateClusterConfig ( deviceID )
2015-06-28 16:05:29 +01:00
conn . ClusterConfig ( cm )
2014-09-20 19:14:45 +02:00
2018-11-13 08:53:55 +01:00
if ( device . Name == "" || m . cfg . Options ( ) . OverwriteRemoteDevNames ) && hello . DeviceName != "" {
2016-04-18 20:25:31 +00:00
device . Name = hello . DeviceName
2019-02-02 12:16:27 +01:00
m . cfg . SetDevice ( device )
m . cfg . Save ( )
2016-04-18 20:25:31 +00:00
}
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-20 19:14:45 +02:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) DownloadProgress ( device protocol . DeviceID , folder string , updates [ ] protocol . FileDownloadProgressUpdate ) {
2016-04-15 10:59:41 +00:00
m . fmut . RLock ( )
cfg , ok := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
2018-08-21 19:49:35 +02:00
if ! ok || cfg . DisableTempIndexes || ! cfg . SharedWith ( device ) {
2016-04-15 10:59:41 +00:00
return
}
m . pmut . RLock ( )
m . deviceDownloads [ device ] . Update ( folder , updates )
2016-05-26 06:53:27 +00:00
state := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
2016-04-15 10:59:41 +00:00
m . pmut . RUnlock ( )
2016-05-22 07:52:08 +00:00
events . Default . Log ( events . RemoteDownloadProgress , map [ string ] interface { } {
"device" : device . String ( ) ,
"folder" : folder ,
2016-05-26 06:53:27 +00:00
"state" : state ,
2016-05-22 07:52:08 +00:00
} )
2016-04-15 10:59:41 +00:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2014-09-28 12:00:38 +01:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 19:14:45 +02:00
return sr
}
2014-12-08 16:36:15 +01:00
2015-09-04 13:22:59 +02:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
2014-12-08 16:36:15 +01:00
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 19:14:45 +02:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
2014-09-28 12:00:38 +01:00
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 13:04:37 +02:00
}
2019-03-10 17:05:39 +01:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , prevSequence int64 , dropSymlinks bool ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2018-05-01 23:39:15 +02:00
l . Debugf ( "Starting sendIndexes for %s to %s at %s (slv=%d)" , folder , deviceID , conn , prevSequence )
2018-01-12 11:27:55 +00:00
defer l . Debugf ( "Exiting sendIndexes for %s to %s at %s: %v" , folder , deviceID , conn , err )
2014-05-04 17:18:58 +02:00
2018-05-01 23:39:15 +02:00
// We need to send one index, regardless of whether there is something to send or not
2019-03-10 17:05:39 +01:00
prevSequence , err = sendIndexTo ( prevSequence , conn , folder , fs , dropSymlinks )
2014-07-30 20:08:04 +02:00
2016-01-11 16:49:44 +01:00
// Subscribe to LocalIndexUpdated (we have new information to send) and
// DeviceDisconnected (it might be us who disconnected, so we should
// exit).
sub := events . Default . Subscribe ( events . LocalIndexUpdated | events . DeviceDisconnected )
2015-07-28 21:22:44 +04:00
defer events . Default . Unsubscribe ( sub )
2014-07-15 13:04:37 +02:00
for err == nil {
2016-01-11 16:49:44 +01:00
if conn . Closed ( ) {
// Our work is done.
return
}
2016-07-29 19:54:24 +00:00
// While we have sent a sequence at least equal to the one
2015-07-28 21:22:44 +04:00
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2018-05-01 23:39:15 +02:00
if fs . Sequence ( protocol . LocalDeviceID ) <= prevSequence {
2019-02-02 12:16:27 +01:00
sub . Poll ( time . Minute )
2014-07-30 20:08:04 +02:00
continue
2014-07-15 13:04:37 +02:00
}
2019-03-10 17:05:39 +01:00
prevSequence , err = sendIndexTo ( prevSequence , conn , folder , fs , dropSymlinks )
2015-07-28 21:22:44 +04:00
// Wait a short amount of time before entering the next loop. If there
2015-11-11 21:20:34 -05:00
// are continuous changes happening to the local index, this gives us
2015-07-28 21:22:44 +04:00
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 20:08:04 +02:00
}
}
2014-07-15 13:04:37 +02:00
2018-05-01 23:39:15 +02:00
// sendIndexTo sends file infos with a sequence number higher than prevSequence and
// returns the highest sent sequence number.
2019-03-10 17:05:39 +01:00
func sendIndexTo ( prevSequence int64 , conn protocol . Connection , folder string , fs * db . FileSet , dropSymlinks bool ) ( int64 , error ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2018-05-01 23:39:15 +02:00
initial := prevSequence == 0
2018-08-25 10:32:35 +02:00
batch := newFileInfoBatch ( nil )
batch . flushFn = func ( fs [ ] protocol . FileInfo ) error {
l . Debugf ( "Sending indexes for %s to %s at %s: %d files (<%d bytes)" , folder , deviceID , conn , len ( batch . infos ) , batch . size )
if initial {
initial = false
return conn . Index ( folder , fs )
}
return conn . IndexUpdate ( folder , fs )
2018-01-12 11:27:55 +00:00
}
2014-07-15 13:04:37 +02:00
2018-08-25 10:32:35 +02:00
var err error
var f protocol . FileInfo
2018-05-01 23:39:15 +02:00
fs . WithHaveSequence ( prevSequence + 1 , func ( fi db . FileIntf ) bool {
2018-08-25 10:32:35 +02:00
if err = batch . flushIfFull ( ) ; err != nil {
return false
2014-07-15 13:04:37 +02:00
}
2018-09-02 21:05:53 +02:00
if shouldDebug ( ) {
if fi . SequenceNo ( ) < prevSequence + 1 {
panic ( fmt . Sprintln ( "sequence lower than requested, got:" , fi . SequenceNo ( ) , ", asked to start at:" , prevSequence + 1 ) )
}
if f . Sequence > 0 && fi . SequenceNo ( ) <= f . Sequence {
panic ( fmt . Sprintln ( "non-increasing sequence, current:" , fi . SequenceNo ( ) , "<= previous:" , f . Sequence ) )
}
2014-07-15 13:04:37 +02:00
}
2018-05-01 23:39:15 +02:00
f = fi . ( protocol . FileInfo )
2018-06-24 09:50:18 +02:00
// Mark the file as invalid if any of the local bad stuff flags are set.
f . RawInvalid = f . IsInvalid ( )
2018-07-12 11:15:57 +03:00
// If the file is marked LocalReceive (i.e., changed locally on a
// receive only folder) we do not want it to ever become the
// globally best version, invalid or not.
if f . IsReceiveOnlyChanged ( ) {
f . Version = protocol . Vector { }
}
2018-06-24 09:50:18 +02:00
f . LocalFlags = 0 // never sent externally
2018-05-01 23:39:15 +02:00
if dropSymlinks && f . IsSymlink ( ) {
// Do not send index entries with symlinks to clients that can't
// handle it. Fixes issue #3802. Once both sides are upgraded, a
// rescan (i.e., change) of the symlink is required for it to
// sync again, due to delta indexes.
return true
}
2018-08-25 10:32:35 +02:00
batch . append ( f )
2014-07-30 20:08:04 +02:00
return true
} )
2018-05-01 23:39:15 +02:00
if err != nil {
return prevSequence , err
}
2018-08-25 10:32:35 +02:00
err = batch . flush ( )
2014-07-30 20:08:04 +02:00
2018-05-01 23:39:15 +02:00
// True if there was nothing to be sent
if f . Sequence == 0 {
return prevSequence , err
}
return f . Sequence , err
2014-01-06 11:11:18 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , weakHash uint32 , fromTemporary bool ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
nc , ok := m . conn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 11:11:18 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 11:11:18 +01:00
}
2018-05-05 09:24:44 +01:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x wh=%x ft=%t" , m , deviceID , folder , name , offset , size , hash , weakHash , fromTemporary )
2014-01-06 11:11:18 +01:00
2018-05-05 09:24:44 +01:00
return nc . Request ( folder , name , offset , size , hash , weakHash , fromTemporary )
2014-01-06 11:11:18 +01:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) ScanFolders ( ) map [ string ] error {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-13 05:12:01 +09:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 12:00:38 +01:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 23:54:31 +01:00
errorsMut := sync . NewMutex ( )
2015-02-11 19:52:59 +01:00
2015-04-22 23:54:31 +01:00
wg := sync . NewWaitGroup ( )
2014-09-28 12:00:38 +01:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 20:42:12 -03:00
go func ( ) {
2014-09-28 12:00:38 +01:00
err := m . ScanFolder ( folder )
2014-05-28 06:55:30 +02:00
if err != nil {
2015-02-11 19:52:59 +01:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2015-04-13 05:12:01 +09:00
2015-03-28 14:25:42 +00:00
// Potentially sets the error twice, once in the scanner just
// by doing a check, and once here, if the error returned is
2017-10-24 07:58:55 +00:00
// the same one as returned by CheckHealth, though
2015-04-13 05:12:01 +09:00
// duplicate set is handled by setError.
m . fmut . RLock ( )
srv := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
srv . setError ( err )
2014-05-28 06:55:30 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Done ( )
} ( )
2014-04-14 09:58:17 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Wait ( )
2015-02-11 19:52:59 +01:00
return errors
2014-03-29 18:53:48 +01:00
}
2013-12-15 11:43:31 +01:00
2019-02-26 09:09:25 +01:00
func ( m * model ) ScanFolder ( folder string ) error {
2016-06-29 06:37:34 +00:00
return m . ScanFolderSubdirs ( folder , nil )
2014-08-11 20:20:01 +02:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) ScanFolderSubdirs ( folder string , subs [ ] string ) error {
2017-12-15 20:01:56 +00:00
m . fmut . RLock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
m . fmut . RUnlock ( )
return err
2015-06-20 19:26:25 +02:00
}
2017-12-15 20:01:56 +00:00
runner := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
2015-06-20 19:26:25 +02:00
return runner . Scan ( subs )
}
2019-02-26 09:09:25 +01:00
func ( m * model ) DelayScan ( folder string , next time . Duration ) {
2015-05-01 14:30:17 +02:00
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 20:46:32 +02:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
2019-02-26 09:09:25 +01:00
func ( m * model ) numHashers ( folder string ) int {
2015-04-29 20:46:32 +02:00
m . fmut . Lock ( )
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
m . fmut . Unlock ( )
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 10:05:06 +02:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 20:46:32 +02:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2015-11-17 12:08:53 +01:00
// generateClusterConfig returns a ClusterConfigMessage that is correct for
// the given peer device
2019-02-26 09:09:25 +01:00
func ( m * model ) generateClusterConfig ( device protocol . DeviceID ) protocol . ClusterConfig {
2016-07-04 10:40:29 +00:00
var message protocol . ClusterConfig
2014-04-13 15:28:26 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2018-05-08 09:19:34 +02:00
defer m . fmut . RUnlock ( )
2016-07-23 12:46:31 +00:00
2018-04-22 18:01:52 +02:00
for _ , folderCfg := range m . cfg . FolderList ( ) {
2018-06-06 23:34:11 +02:00
if ! folderCfg . SharedWith ( device ) {
2018-05-08 09:19:34 +02:00
continue
}
2016-03-11 09:48:46 +00:00
protocolFolder := protocol . Folder {
2018-04-22 18:01:52 +02:00
ID : folderCfg . ID ,
2016-07-04 10:40:29 +00:00
Label : folderCfg . Label ,
2016-12-16 22:23:35 +00:00
ReadOnly : folderCfg . Type == config . FolderTypeSendOnly ,
2016-07-04 10:40:29 +00:00
IgnorePermissions : folderCfg . IgnorePerms ,
IgnoreDelete : folderCfg . IgnoreDelete ,
DisableTempIndexes : folderCfg . DisableTempIndexes ,
2016-12-21 18:41:25 +00:00
Paused : folderCfg . Paused ,
2015-09-27 12:11:34 +01:00
}
2016-07-04 10:40:29 +00:00
2018-05-08 09:19:34 +02:00
var fs * db . FileSet
2018-04-22 18:01:52 +02:00
if ! folderCfg . Paused {
2018-05-08 09:19:34 +02:00
fs = m . folderFiles [ folderCfg . ID ]
2018-04-22 18:01:52 +02:00
}
2016-07-23 12:46:31 +00:00
2018-04-22 18:01:52 +02:00
for _ , device := range folderCfg . Devices {
deviceCfg , _ := m . cfg . Device ( device . DeviceID )
2016-07-23 12:46:31 +00:00
2016-03-11 09:48:46 +00:00
protocolDevice := protocol . Device {
2018-04-22 18:01:52 +02:00
ID : deviceCfg . DeviceID ,
2016-07-29 19:54:24 +00:00
Name : deviceCfg . Name ,
Addresses : deviceCfg . Addresses ,
Compression : deviceCfg . Compression ,
CertName : deviceCfg . CertName ,
Introducer : deviceCfg . Introducer ,
2018-04-22 18:01:52 +02:00
}
2018-05-08 09:19:34 +02:00
if fs != nil {
2018-04-22 18:01:52 +02:00
if deviceCfg . DeviceID == m . id {
protocolDevice . IndexID = fs . IndexID ( protocol . LocalDeviceID )
protocolDevice . MaxSequence = fs . Sequence ( protocol . LocalDeviceID )
} else {
protocolDevice . IndexID = fs . IndexID ( deviceCfg . DeviceID )
protocolDevice . MaxSequence = fs . Sequence ( deviceCfg . DeviceID )
}
2014-09-23 16:04:20 +02:00
}
2015-09-27 11:39:02 +01:00
2016-03-11 09:48:46 +00:00
protocolFolder . Devices = append ( protocolFolder . Devices , protocolDevice )
2014-01-09 13:58:35 +01:00
}
2018-04-22 18:01:52 +02:00
2016-03-11 09:48:46 +00:00
message . Folders = append ( message . Folders , protocolFolder )
2013-12-29 20:33:57 -05:00
}
2014-04-13 15:28:26 +02:00
2016-03-11 09:48:46 +00:00
return message
2013-12-29 20:33:57 -05:00
}
2014-04-14 09:58:17 +02:00
2019-02-26 09:09:25 +01:00
func ( m * model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 21:14:19 +01:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-13 05:12:01 +09:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 21:14:19 +01:00
}
2015-04-13 05:12:01 +09:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 09:58:17 +02:00
}
2014-06-16 10:47:02 +02:00
2019-02-26 09:09:25 +01:00
func ( m * model ) FolderErrors ( folder string ) ( [ ] FileError , error ) {
2018-01-14 17:01:06 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
return nil , err
}
2018-11-07 11:04:41 +01:00
return m . folderRunners [ folder ] . Errors ( ) , nil
2018-01-14 17:01:06 +00:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) WatchError ( folder string ) error {
2018-02-04 22:46:24 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
return err
}
return m . folderRunners [ folder ] . WatchError ( )
}
2019-02-26 09:09:25 +01:00
func ( m * model ) Override ( folder string ) {
2018-05-21 08:56:24 +02:00
// Grab the runner and the file set.
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2019-04-07 13:29:17 +02:00
runner , ok := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2019-04-07 13:29:17 +02:00
if ! ok {
2015-04-18 22:41:47 +09:00
return
}
2014-06-23 11:52:13 +02:00
2018-05-21 08:56:24 +02:00
// Run the override, taking updates as if they came from scanning.
2014-07-15 17:54:00 +02:00
2019-04-07 13:29:17 +02:00
runner . Override ( )
2014-06-16 10:47:02 +02:00
}
2014-06-20 00:27:54 +02:00
2019-02-26 09:09:25 +01:00
func ( m * model ) Revert ( folder string ) {
2018-07-12 11:15:57 +03:00
// Grab the runner and the file set.
m . fmut . RLock ( )
2019-04-07 13:29:17 +02:00
runner , ok := m . folderRunners [ folder ]
2018-07-12 11:15:57 +03:00
m . fmut . RUnlock ( )
2019-04-07 13:29:17 +02:00
if ! ok {
2018-07-12 11:15:57 +03:00
return
}
// Run the revert, taking updates as if they came from scanning.
2019-04-07 13:29:17 +02:00
runner . Revert ( )
2018-07-12 11:15:57 +03:00
}
2016-07-29 19:54:24 +00:00
// CurrentSequence returns the change version for the given folder.
2014-09-28 12:00:38 +01:00
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 14:44:15 +02:00
// changed.
2019-02-26 09:09:25 +01:00
func ( m * model ) CurrentSequence ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
2014-10-12 10:36:04 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-09-27 14:44:15 +02:00
}
2016-07-29 19:54:24 +00:00
return fs . Sequence ( protocol . LocalDeviceID ) , true
2014-09-27 14:44:15 +02:00
}
2016-07-29 19:54:24 +00:00
// RemoteSequence returns the change version for the given folder, as
2014-09-27 14:44:15 +02:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 12:00:38 +01:00
// the remote or global folder has changed.
2019-02-26 09:09:25 +01:00
func ( m * model ) RemoteSequence ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 17:54:00 +02:00
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2018-06-06 23:34:11 +02:00
cfg := m . folderCfgs [ folder ]
2014-07-15 17:54:00 +02:00
if ! ok {
2014-10-24 14:54:36 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-07-15 17:54:00 +02:00
}
2015-01-18 02:12:06 +01:00
var ver int64
2018-06-06 23:34:11 +02:00
for _ , device := range cfg . Devices {
ver += fs . Sequence ( device . DeviceID )
2014-06-20 00:27:54 +02:00
}
2015-06-24 08:52:38 +01:00
return ver , true
2014-06-20 00:27:54 +02:00
}
2014-09-27 14:44:15 +02:00
2019-02-26 09:09:25 +01:00
func ( m * model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
2015-02-07 10:52:42 +00:00
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
files . WithPrefixedGlobalTruncated ( prefix , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
2018-05-17 09:26:40 +02:00
// Don't include the prefix itself.
if f . IsInvalid ( ) || f . IsDeleted ( ) || strings . HasPrefix ( prefix , f . Name ) {
2015-02-07 10:52:42 +00:00
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 22:37:04 +09:00
last [ base ] = [ ] interface { } {
2016-08-06 13:05:59 +00:00
f . ModTime ( ) , f . FileSize ( ) ,
2015-02-07 10:52:42 +00:00
}
}
return true
} )
return output
}
2019-02-26 09:09:25 +01:00
func ( m * model ) GetFolderVersions ( folder string ) ( map [ string ] [ ] versioner . FileVersion , error ) {
2018-01-01 14:39:23 +00:00
fcfg , ok := m . cfg . Folder ( folder )
if ! ok {
return nil , errFolderMissing
}
files := make ( map [ string ] [ ] versioner . FileVersion )
filesystem := fcfg . Filesystem ( )
err := filesystem . Walk ( ".stversions" , func ( path string , f fs . FileInfo , err error ) error {
// Skip root (which is ok to be a symlink)
if path == ".stversions" {
return nil
}
2019-02-02 12:09:07 +01:00
// Skip walking if we cannot walk...
if err != nil {
return err
}
2018-01-01 14:39:23 +00:00
// Ignore symlinks
if f . IsSymlink ( ) {
return fs . SkipDir
}
// No records for directories
if f . IsDir ( ) {
return nil
}
// Strip .stversions prefix.
path = strings . TrimPrefix ( path , ".stversions" + string ( fs . PathSeparator ) )
name , tag := versioner . UntagFilename ( path )
// Something invalid
if name == "" || tag == "" {
return nil
}
name = osutil . NormalizedFilename ( name )
versionTime , err := time . ParseInLocation ( versioner . TimeFormat , tag , locationLocal )
if err != nil {
return nil
}
files [ name ] = append ( files [ name ] , versioner . FileVersion {
VersionTime : versionTime . Truncate ( time . Second ) ,
ModTime : f . ModTime ( ) . Truncate ( time . Second ) ,
Size : f . Size ( ) ,
} )
return nil
} )
if err != nil {
return nil , err
}
return files , nil
}
2019-02-26 09:09:25 +01:00
func ( m * model ) RestoreFolderVersions ( folder string , versions map [ string ] time . Time ) ( map [ string ] string , error ) {
2018-01-01 14:39:23 +00:00
fcfg , ok := m . cfg . Folder ( folder )
if ! ok {
return nil , errFolderMissing
}
filesystem := fcfg . Filesystem ( )
ver := fcfg . Versioner ( )
restore := make ( map [ string ] string )
errors := make ( map [ string ] string )
// Validation
for file , version := range versions {
file = osutil . NativeFilename ( file )
tag := version . In ( locationLocal ) . Truncate ( time . Second ) . Format ( versioner . TimeFormat )
versionedTaggedFilename := filepath . Join ( ".stversions" , versioner . TagFilename ( file , tag ) )
// Check that the thing we've been asked to restore is actually a file
// and that it exists.
if info , err := filesystem . Lstat ( versionedTaggedFilename ) ; err != nil {
errors [ file ] = err . Error ( )
continue
} else if ! info . IsRegular ( ) {
errors [ file ] = "not a file"
continue
}
// Check that the target location of where we are supposed to restore
// either does not exist, or is actually a file.
if info , err := filesystem . Lstat ( file ) ; err == nil && ! info . IsRegular ( ) {
errors [ file ] = "cannot replace a non-file"
continue
} else if err != nil && ! fs . IsNotExist ( err ) {
errors [ file ] = err . Error ( )
continue
}
restore [ file ] = versionedTaggedFilename
}
// Execution
var err error
for target , source := range restore {
err = nil
if _ , serr := filesystem . Lstat ( target ) ; serr == nil {
if ver != nil {
err = osutil . InWritableDir ( ver . Archive , filesystem , target )
} else {
err = osutil . InWritableDir ( filesystem . Remove , filesystem , target )
}
}
2019-02-02 12:16:27 +01:00
filesystem . MkdirAll ( filepath . Dir ( target ) , 0755 )
2018-01-01 14:39:23 +00:00
if err == nil {
err = osutil . Copy ( filesystem , source , target )
}
if err != nil {
errors [ target ] = err . Error ( )
continue
}
}
// Trigger scan
if ! fcfg . FSWatcherEnabled {
m . ScanFolder ( folder )
}
return errors , nil
}
2019-02-26 09:09:25 +01:00
func ( m * model ) Availability ( folder string , file protocol . FileInfo , block protocol . BlockInfo ) [ ] Availability {
2016-11-08 06:38:50 +00:00
// The slightly unusual locking sequence here is because we need to hold
// pmut for the duration (as the value returned from foldersFiles can
// get heavily modified on Close()), but also must acquire fmut before
// pmut. (The locks can be *released* in any order.)
m . fmut . RLock ( )
2014-10-31 23:41:18 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2018-06-06 23:34:11 +02:00
cfg := m . folderCfgs [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2016-11-08 06:38:50 +00:00
2014-09-27 14:44:15 +02:00
if ! ok {
return nil
}
2016-04-15 10:59:41 +00:00
var availabilities [ ] Availability
2016-12-21 18:41:25 +00:00
next :
2018-04-16 20:08:50 +02:00
for _ , device := range fs . Availability ( file . Name ) {
2016-12-21 18:41:25 +00:00
for _ , pausedFolder := range m . remotePausedFolders [ device ] {
if pausedFolder == folder {
continue next
}
}
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ device ]
2014-10-31 23:41:18 +00:00
if ok {
2016-04-15 10:59:41 +00:00
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : false } )
2014-10-31 23:41:18 +00:00
}
}
2016-04-15 10:59:41 +00:00
2018-06-06 23:34:11 +02:00
for _ , device := range cfg . Devices {
if m . deviceDownloads [ device . DeviceID ] . Has ( folder , file . Name , file . Version , int32 ( block . Offset / int64 ( file . BlockSize ( ) ) ) ) {
availabilities = append ( availabilities , Availability { ID : device . DeviceID , FromTemporary : true } )
2016-04-15 10:59:41 +00:00
}
}
return availabilities
2014-09-27 14:44:15 +02:00
}
2015-04-28 22:32:10 +02:00
// BringToFront bumps the given files priority in the job queue.
2019-02-26 09:09:25 +01:00
func ( m * model ) BringToFront ( folder , file string ) {
2014-12-01 19:23:06 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 09:35:21 +01:00
runner . BringToFront ( file )
2014-12-01 19:23:06 +00:00
}
}
2019-02-26 09:09:25 +01:00
func ( m * model ) ResetFolder ( folder string ) {
2015-06-21 09:35:41 +02:00
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 20:06:03 +02:00
}
2019-02-26 09:09:25 +01:00
func ( m * model ) String ( ) string {
2014-09-27 14:44:15 +02:00
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 14:43:01 +02:00
2019-02-26 09:09:25 +01:00
func ( m * model ) VerifyConfiguration ( from , to config . Configuration ) error {
2015-06-03 09:47:39 +02:00
return nil
}
2019-02-26 09:09:25 +01:00
func ( m * model ) CommitConfiguration ( from , to config . Configuration ) bool {
2015-06-03 09:47:39 +02:00
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 09:02:55 +02:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 16:13:53 +02:00
for folderID , cfg := range toFolders {
2015-07-22 09:02:55 +02:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 16:13:53 +02:00
// A folder was added.
2017-04-01 09:58:06 +00:00
if cfg . Paused {
2017-12-07 07:08:24 +00:00
l . Infoln ( "Paused folder" , cfg . Description ( ) )
2017-04-01 09:58:06 +00:00
} else {
2017-12-07 07:08:24 +00:00
l . Infoln ( "Adding folder" , cfg . Description ( ) )
2017-04-01 09:58:06 +00:00
m . AddFolder ( cfg )
m . StartFolder ( folderID )
}
2015-07-22 09:02:55 +02:00
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
2015-11-13 13:30:52 +01:00
// The folder was removed.
2017-10-03 23:53:02 +01:00
m . RemoveFolder ( fromCfg )
2015-11-13 13:30:52 +01:00
continue
2015-07-22 09:02:55 +02:00
}
2019-01-09 17:31:09 +01:00
if fromCfg . Paused && toCfg . Paused {
continue
}
2016-08-07 16:21:59 +00:00
// This folder exists on both sides. Settings might have changed.
2017-12-07 08:33:32 +00:00
// Check if anything differs that requires a restart.
if ! reflect . DeepEqual ( fromCfg . RequiresRestartOnly ( ) , toCfg . RequiresRestartOnly ( ) ) {
2018-06-06 23:34:11 +02:00
m . RestartFolder ( fromCfg , toCfg )
2015-07-22 09:02:55 +02:00
}
2016-12-21 18:41:25 +00:00
// Emit the folder pause/resume event
if fromCfg . Paused != toCfg . Paused {
eventType := events . FolderResumed
if toCfg . Paused {
eventType = events . FolderPaused
}
events . Default . Log ( eventType , map [ string ] string { "id" : toCfg . ID , "label" : toCfg . Label } )
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
2016-08-07 16:21:59 +00:00
// Removing a device. We actually don't need to do anything.
// Because folder config has changed (since the device lists do not match)
// Folders for that had device got "restarted", which involves killing
// connections to all devices that we were sharing the folder with.
// At some point model.Close() will get called for that device which will
// clean residue device state that is not part of any folder.
2015-06-03 09:47:39 +02:00
2016-12-21 18:41:25 +00:00
// Pausing a device, unpausing is handled by the connection service.
2018-03-26 12:01:59 +02:00
fromDevices := from . DeviceMap ( )
toDevices := to . DeviceMap ( )
2016-12-21 18:41:25 +00:00
for deviceID , toCfg := range toDevices {
fromCfg , ok := fromDevices [ deviceID ]
if ! ok || fromCfg . Paused == toCfg . Paused {
continue
}
2018-08-25 11:36:10 +01:00
// Ignored folder was removed, reconnect to retrigger the prompt.
if len ( fromCfg . IgnoredFolders ) > len ( toCfg . IgnoredFolders ) {
2019-01-09 17:31:09 +01:00
m . close ( deviceID , errIgnoredFolderRemoved )
2018-08-25 11:36:10 +01:00
}
2016-12-21 18:41:25 +00:00
if toCfg . Paused {
l . Infoln ( "Pausing" , deviceID )
2019-01-09 17:31:09 +01:00
m . close ( deviceID , errDevicePaused )
2016-12-21 18:41:25 +00:00
events . Default . Log ( events . DevicePaused , map [ string ] string { "device" : deviceID . String ( ) } )
} else {
events . Default . Log ( events . DeviceResumed , map [ string ] string { "device" : deviceID . String ( ) } )
}
}
2018-12-05 07:40:05 +00:00
scanLimiter . setCapacity ( to . Options . MaxConcurrentScans )
2016-01-18 10:06:31 -08:00
// Some options don't require restart as those components handle it fine
2017-12-07 08:33:32 +00:00
// by themselves. Compare the options structs containing only the
// attributes that require restart and act apprioriately.
if ! reflect . DeepEqual ( from . Options . RequiresRestartOnly ( ) , to . Options . RequiresRestartOnly ( ) ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, options differ" )
2015-06-03 09:47:39 +02:00
return false
}
return true
}
2017-12-15 20:01:56 +00:00
// checkFolderRunningLocked returns nil if the folder is up and running and a
// descriptive error if not.
// Need to hold (read) lock on m.fmut when calling this.
2019-02-26 09:09:25 +01:00
func ( m * model ) checkFolderRunningLocked ( folder string ) error {
2017-12-15 20:01:56 +00:00
_ , ok := m . folderRunners [ folder ]
if ok {
return nil
}
if cfg , ok := m . cfg . Folder ( folder ) ; ! ok {
return errFolderMissing
} else if cfg . Paused {
2018-01-14 17:01:06 +00:00
return ErrFolderPaused
2017-12-15 20:01:56 +00:00
}
return errFolderNotRunning
}
// checkFolderDeviceStatusLocked first checks the folder and then whether the
// given device is connected and shares this folder.
// Need to hold (read) lock on both m.fmut and m.pmut when calling this.
2019-02-26 09:09:25 +01:00
func ( m * model ) checkDeviceFolderConnectedLocked ( device protocol . DeviceID , folder string ) error {
2017-12-15 20:01:56 +00:00
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
return err
}
if cfg , ok := m . cfg . Device ( device ) ; ! ok {
return errDeviceUnknown
} else if cfg . Paused {
return errDevicePaused
}
if _ , ok := m . conn [ device ] ; ! ok {
return errors . New ( "device is not connected" )
}
2018-06-06 23:34:11 +02:00
if cfg , ok := m . cfg . Folder ( folder ) ; ! ok || ! cfg . SharedWith ( device ) {
2017-12-15 20:01:56 +00:00
return errors . New ( "folder is not shared with device" )
}
return nil
}
2015-07-22 09:02:55 +02:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
2015-04-25 22:53:44 +01:00
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk ( data [ ] string , skip , get int ) ( [ ] string , int , int ) {
l := len ( data )
if l <= skip {
return [ ] string { } , skip - l , get
} else if l < skip + get {
return data [ skip : l ] , 0 , get - ( l - skip )
}
return data [ skip : skip + get ] , 0 , 0
}
2015-07-22 09:02:55 +02:00
2017-08-19 14:36:56 +00:00
func readOffsetIntoBuf ( fs fs . Filesystem , file string , offset int64 , buf [ ] byte ) error {
fd , err := fs . Open ( file )
2016-04-15 10:59:41 +00:00
if err != nil {
l . Debugln ( "readOffsetIntoBuf.Open" , file , err )
return err
}
defer fd . Close ( )
_ , err = fd . ReadAt ( buf , offset )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.ReadAt" , file , err )
}
return err
}
2016-05-01 06:49:29 +00:00
// makeForgetUpdate takes an index update and constructs a download progress update
// causing to forget any progress for files which we've just been sent.
func makeForgetUpdate ( files [ ] protocol . FileInfo ) [ ] protocol . FileDownloadProgressUpdate {
updates := make ( [ ] protocol . FileDownloadProgressUpdate , 0 , len ( files ) )
for _ , file := range files {
if file . IsSymlink ( ) || file . IsDirectory ( ) || file . IsDeleted ( ) {
continue
}
updates = append ( updates , protocol . FileDownloadProgressUpdate {
Name : file . Name ,
Version : file . Version ,
UpdateType : protocol . UpdateTypeForget ,
} )
}
return updates
}
2016-08-05 07:13:52 +00:00
2016-11-07 16:40:48 +00:00
// folderDeviceSet is a set of (folder, deviceID) pairs
type folderDeviceSet map [ string ] map [ protocol . DeviceID ] struct { }
// set adds the (dev, folder) pair to the set
func ( s folderDeviceSet ) set ( dev protocol . DeviceID , folder string ) {
devs , ok := s [ folder ]
if ! ok {
devs = make ( map [ protocol . DeviceID ] struct { } )
s [ folder ] = devs
}
devs [ dev ] = struct { } { }
}
// has returns true if the (dev, folder) pair is in the set
func ( s folderDeviceSet ) has ( dev protocol . DeviceID , folder string ) bool {
_ , ok := s [ folder ] [ dev ]
return ok
}
// hasDevice returns true if the device is set on any folder
func ( s folderDeviceSet ) hasDevice ( dev protocol . DeviceID ) bool {
for _ , devices := range s {
if _ , ok := devices [ dev ] ; ok {
return true
}
}
return false
}
2018-08-25 10:32:35 +02:00
type fileInfoBatch struct {
infos [ ] protocol . FileInfo
size int
flushFn func ( [ ] protocol . FileInfo ) error
}
func newFileInfoBatch ( fn func ( [ ] protocol . FileInfo ) error ) * fileInfoBatch {
return & fileInfoBatch {
infos : make ( [ ] protocol . FileInfo , 0 , maxBatchSizeFiles ) ,
flushFn : fn ,
}
}
func ( b * fileInfoBatch ) append ( f protocol . FileInfo ) {
b . infos = append ( b . infos , f )
b . size += f . ProtoSize ( )
}
func ( b * fileInfoBatch ) flushIfFull ( ) error {
2019-03-05 21:34:04 +01:00
if len ( b . infos ) >= maxBatchSizeFiles || b . size >= maxBatchSizeBytes {
2018-08-25 10:32:35 +02:00
return b . flush ( )
}
return nil
}
func ( b * fileInfoBatch ) flush ( ) error {
if len ( b . infos ) == 0 {
return nil
}
if err := b . flushFn ( b . infos ) ; err != nil {
return err
}
b . reset ( )
return nil
}
func ( b * fileInfoBatch ) reset ( ) {
b . infos = b . infos [ : 0 ]
b . size = 0
}
2018-10-05 10:26:25 +02:00
// syncMutexMap is a type safe wrapper for a sync.Map that holds mutexes
type syncMutexMap struct {
inner stdsync . Map
}
func ( m * syncMutexMap ) Get ( key string ) sync . Mutex {
v , _ := m . inner . LoadOrStore ( key , sync . NewMutex ( ) )
return v . ( sync . Mutex )
}
2019-01-05 18:10:02 +01:00
// sanitizePath takes a string that might contain all kinds of special
// characters and makes a valid, similar, path name out of it.
//
// Spans of invalid characters are replaced by a single space. Invalid
// characters are control characters, the things not allowed in file names
// in Windows, and common shell metacharacters. Even if asterisks and pipes
// and stuff are allowed on Unixes in general they might not be allowed by
// the filesystem and may surprise the user and cause shell oddness. This
// function is intended for file names we generate on behalf of the user,
// and surprising them with odd shell characters in file names is unkind.
//
// We include whitespace in the invalid characters so that multiple
// whitespace is collapsed to a single space. Additionally, whitespace at
// either end is removed.
func sanitizePath ( path string ) string {
invalid := regexp . MustCompile ( ` ([[:cntrl:]]|[<>:"'/\\|?*\n\r\t \[\]\ { \};:!@$%&^#])+ ` )
return strings . TrimSpace ( invalid . ReplaceAllString ( path , " " ) )
}