2014-11-16 21:13:20 +01:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 21:43:32 +02:00
//
2015-03-07 21:36:35 +01:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-09 07:52:18 +01:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-06-01 22:50:14 +02:00
2014-05-15 00:26:55 -03:00
package model
2013-12-15 11:43:31 +01:00
import (
2017-04-26 00:15:23 +00:00
"context"
2014-09-10 08:48:15 +02:00
"crypto/tls"
2015-03-10 23:45:43 +01:00
"encoding/json"
2014-01-06 21:31:36 +01:00
"errors"
2013-12-23 12:12:44 -05:00
"fmt"
2013-12-31 21:22:49 -05:00
"io"
2014-01-05 23:54:57 +01:00
"net"
2014-03-28 14:36:57 +01:00
"path/filepath"
2015-06-03 09:47:39 +02:00
"reflect"
2015-04-29 20:46:32 +02:00
"runtime"
2016-03-18 08:28:44 +00:00
"sort"
2014-08-11 20:20:01 +02:00
"strings"
2013-12-15 11:43:31 +01:00
"time"
2014-06-21 09:43:12 +02:00
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/config"
2016-05-04 19:38:12 +00:00
"github.com/syncthing/syncthing/lib/connections"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
2016-08-05 17:45:45 +00:00
"github.com/syncthing/syncthing/lib/fs"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 19:38:46 +02:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/sync"
2016-12-17 19:48:33 +00:00
"github.com/syncthing/syncthing/lib/upgrade"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/versioner"
2017-02-06 10:27:11 +00:00
"github.com/syncthing/syncthing/lib/weakhash"
2015-06-12 13:04:00 +02:00
"github.com/thejerf/suture"
2013-12-15 11:43:31 +01:00
)
2018-01-01 14:39:23 +00:00
var locationLocal * time . Location
func init ( ) {
var err error
locationLocal , err = time . LoadLocation ( "Local" )
if err != nil {
panic ( err . Error ( ) )
}
}
2014-07-15 13:04:37 +02:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 20:54:59 +02:00
const (
2017-04-22 14:23:33 +00:00
maxBatchSizeBytes = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
maxBatchSizeFiles = 1000 // Either way, don't include more files than this
2014-08-11 20:54:59 +02:00
)
2014-07-15 13:04:37 +02:00
2014-09-30 17:52:05 +02:00
type service interface {
2014-12-30 09:35:21 +01:00
BringToFront ( string )
2015-05-01 14:30:17 +02:00
DelayScan ( d time . Duration )
2017-11-20 16:29:36 +00:00
IgnoresUpdated ( ) // ignore matcher was updated notification
SchedulePull ( ) // something relevant changed, we should try a pull
2016-06-29 06:37:34 +00:00
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2015-06-20 19:26:25 +02:00
Scan ( subs [ ] string ) error
2016-06-29 06:37:34 +00:00
Serve ( )
Stop ( )
2017-10-24 07:58:55 +00:00
CheckHealth ( ) error
2018-01-14 17:01:06 +00:00
PullErrors ( ) [ ] FileError
2018-02-04 22:46:24 +01:00
WatchError ( ) error
2015-03-16 21:14:19 +01:00
2016-06-29 06:37:34 +00:00
getState ( ) ( folderState , time . Time , error )
2015-04-13 05:12:01 +09:00
setState ( state folderState )
2016-06-29 06:37:34 +00:00
setError ( err error )
2014-09-30 17:52:05 +02:00
}
2016-04-15 10:59:41 +00:00
type Availability struct {
ID protocol . DeviceID ` json:"id" `
FromTemporary bool ` json:"fromTemporary" `
}
2013-12-15 11:43:31 +01:00
type Model struct {
2015-06-12 13:04:00 +02:00
* suture . Supervisor
2015-07-23 16:13:53 +02:00
cfg * config . Wrapper
2015-10-31 12:31:25 +01:00
db * db . Instance
2015-07-23 16:13:53 +02:00
finder * db . BlockFinder
progressEmitter * ProgressEmitter
id protocol . DeviceID
2016-01-20 11:10:22 -08:00
shortID protocol . ShortID
2015-07-23 16:13:53 +02:00
cacheIgnoredFiles bool
2015-10-18 20:13:58 -04:00
protectedFiles [ ] string
2014-05-15 00:26:55 -03:00
clientName string
clientVersion string
2015-11-13 13:30:52 +01:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
folderFiles map [ string ] * db . FileSet // folder -> files
2016-11-07 16:40:48 +00:00
folderDevices folderDeviceSet // folder -> deviceIDs
2015-11-13 13:30:52 +01:00
deviceFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
folderRunners map [ string ] service // folder -> puller or scanner
folderRunnerTokens map [ string ] [ ] suture . ServiceToken // folder -> tokens for puller or scanner
folderStatRefs map [ string ] * stats . FolderStatisticsReference // folder -> statsRef
fmut sync . RWMutex // protects the above
2014-03-29 18:53:48 +01:00
2016-12-21 18:41:25 +00:00
conn map [ protocol . DeviceID ] connections . Connection
closed map [ protocol . DeviceID ] chan struct { }
helloMessages map [ protocol . DeviceID ] protocol . HelloResult
deviceDownloads map [ protocol . DeviceID ] * deviceDownloadState
remotePausedFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
pmut sync . RWMutex // protects the above
2013-12-15 11:43:31 +01:00
}
2017-08-19 14:36:56 +00:00
type folderFactory func ( * Model , config . FolderConfiguration , versioner . Versioner , fs . Filesystem ) service
2016-05-04 10:47:33 +00:00
2014-01-07 22:44:21 +01:00
var (
2016-05-04 11:26:36 +00:00
folderFactories = make ( map [ config . FolderType ] folderFactory , 0 )
2014-01-07 22:44:21 +01:00
)
2014-01-06 21:31:36 +01:00
2016-06-26 10:07:27 +00:00
var (
2017-10-24 07:58:55 +00:00
errDeviceUnknown = errors . New ( "unknown device" )
errDevicePaused = errors . New ( "device is paused" )
errDeviceIgnored = errors . New ( "device is ignored" )
2018-01-14 17:01:06 +00:00
ErrFolderPaused = errors . New ( "folder is paused" )
2017-12-15 20:01:56 +00:00
errFolderNotRunning = errors . New ( "folder is not running" )
2017-10-24 07:58:55 +00:00
errFolderMissing = errors . New ( "no such folder" )
errNetworkNotAllowed = errors . New ( "network not allowed" )
2016-06-26 10:07:27 +00:00
)
2014-01-06 11:11:18 +01:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 12:00:38 +01:00
// for file data without altering the local folder in any way.
2017-05-22 19:58:33 +00:00
func NewModel ( cfg * config . Wrapper , id protocol . DeviceID , clientName , clientVersion string , ldb * db . Instance , protectedFiles [ ] string ) * Model {
2013-12-15 11:43:31 +01:00
m := & Model {
2015-07-11 11:12:20 +10:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( line )
2015-07-11 11:12:20 +10:00
} ,
} ) ,
2016-12-21 18:41:25 +00:00
cfg : cfg ,
db : ldb ,
finder : db . NewBlockFinder ( ldb ) ,
progressEmitter : NewProgressEmitter ( cfg ) ,
id : id ,
shortID : id . Short ( ) ,
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
protectedFiles : protectedFiles ,
clientName : clientName ,
clientVersion : clientVersion ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
folderDevices : make ( folderDeviceSet ) ,
deviceFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderRunnerTokens : make ( map [ string ] [ ] suture . ServiceToken ) ,
folderStatRefs : make ( map [ string ] * stats . FolderStatisticsReference ) ,
conn : make ( map [ protocol . DeviceID ] connections . Connection ) ,
closed : make ( map [ protocol . DeviceID ] chan struct { } ) ,
helloMessages : make ( map [ protocol . DeviceID ] protocol . HelloResult ) ,
deviceDownloads : make ( map [ protocol . DeviceID ] * deviceDownloadState ) ,
remotePausedFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
fmut : sync . NewRWMutex ( ) ,
pmut : sync . NewRWMutex ( ) ,
2013-12-15 11:43:31 +01:00
}
2014-11-25 22:07:18 +00:00
if cfg . Options ( ) . ProgressUpdateIntervalS > - 1 {
go m . progressEmitter . Serve ( )
}
2016-08-07 16:21:59 +00:00
cfg . Subscribe ( m )
2013-12-15 11:43:31 +01:00
return m
}
2015-04-28 22:32:10 +02:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2015-04-08 13:35:03 +01:00
func ( m * Model ) StartDeadlockDetector ( timeout time . Duration ) {
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2016-10-30 00:14:38 +01:00
detector := newDeadlockDetector ( timeout )
detector . Watch ( "fmut" , m . fmut )
detector . Watch ( "pmut" , m . pmut )
2015-04-08 13:35:03 +01:00
}
2016-06-26 10:07:27 +00:00
// StartFolder constructs the folder service and starts it.
2016-05-04 10:47:33 +00:00
func ( m * Model ) StartFolder ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
2017-08-08 13:13:08 +00:00
folderType := m . startFolderLocked ( folder )
2017-08-08 15:23:29 +02:00
folderCfg := m . folderCfgs [ folder ]
2016-12-21 18:41:25 +00:00
m . pmut . Unlock ( )
2016-08-07 16:21:59 +00:00
m . fmut . Unlock ( )
2016-12-19 10:12:06 +01:00
l . Infof ( "Ready to synchronize %s (%s)" , folderCfg . Description ( ) , folderType )
2016-08-07 16:21:59 +00:00
}
func ( m * Model ) startFolderLocked ( folder string ) config . FolderType {
2017-12-15 20:01:56 +00:00
if err := m . checkFolderRunningLocked ( folder ) ; err == errFolderMissing {
panic ( "cannot start nonexistent folder " + folder )
} else if err == nil {
panic ( "cannot start already running folder " + folder )
2014-09-27 14:44:15 +02:00
}
2017-12-15 20:01:56 +00:00
cfg := m . folderCfgs [ folder ]
2014-09-27 14:44:15 +02:00
2016-05-04 10:47:33 +00:00
folderFactory , ok := folderFactories [ cfg . Type ]
if ! ok {
2016-05-04 11:26:36 +00:00
panic ( fmt . Sprintf ( "unknown folder type 0x%x" , cfg . Type ) )
2016-05-04 10:47:33 +00:00
}
2016-06-26 10:07:27 +00:00
fs := m . folderFiles [ folder ]
2016-08-07 16:21:59 +00:00
// Find any devices for which we hold the index in the db, but the folder
// is not shared, and drop it.
expected := mapDevices ( cfg . DeviceIDs ( ) )
for _ , available := range fs . ListDevices ( ) {
if _ , ok := expected [ available ] ; ! ok {
l . Debugln ( "dropping" , folder , "state for" , available )
2017-11-12 20:20:34 +00:00
fs . Drop ( available )
2016-08-07 16:21:59 +00:00
}
}
2016-12-21 18:41:25 +00:00
// Close connections to affected devices
for _ , id := range cfg . DeviceIDs ( ) {
m . closeLocked ( id )
}
2016-07-29 19:54:24 +00:00
v , ok := fs . Sequence ( protocol . LocalDeviceID ) , true
2016-06-26 10:07:27 +00:00
indexHasFiles := ok && v > 0
if ! indexHasFiles {
// It's a blank folder, so this may the first time we're looking at
// it. Attempt to create and tag with our marker as appropriate. We
// don't really do anything with errors at this point except warn -
// if these things don't work, we still want to start the folder and
// it'll show up as errored later.
2017-03-31 07:51:23 +00:00
// Directory permission bits. Will be filtered down to something
// sane by umask on Unixes.
2017-04-23 23:50:56 +00:00
cfg . CreateRoot ( )
2016-06-26 10:07:27 +00:00
if err := cfg . CreateMarker ( ) ; err != nil {
l . Warnln ( "Creating folder marker:" , err )
}
}
2018-01-01 14:39:23 +00:00
ver := cfg . Versioner ( )
if service , ok := ver . ( suture . Service ) ; ok {
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
token := m . Add ( service )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2014-03-29 18:53:48 +01:00
}
2014-09-27 14:44:15 +02:00
2017-09-20 06:49:04 +00:00
ffs := fs . MtimeFS ( )
// These are our metadata files, and they should always be hidden.
2017-11-05 12:18:05 +00:00
ffs . Hide ( config . DefaultMarkerName )
2017-09-20 06:49:04 +00:00
ffs . Hide ( ".stversions" )
ffs . Hide ( ".stignore" )
p := folderFactory ( m , cfg , ver , ffs )
2017-10-20 14:52:55 +00:00
2016-05-04 10:47:33 +00:00
m . folderRunners [ folder ] = p
2015-10-18 20:13:58 -04:00
m . warnAboutOverwritingProtectedFiles ( folder )
2015-11-13 13:30:52 +01:00
token := m . Add ( p )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2015-07-23 16:13:53 +02:00
2016-08-07 16:21:59 +00:00
return cfg . Type
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
2015-10-18 20:13:58 -04:00
func ( m * Model ) warnAboutOverwritingProtectedFiles ( folder string ) {
2016-12-16 22:23:35 +00:00
if m . folderCfgs [ folder ] . Type == config . FolderTypeSendOnly {
2015-10-18 20:13:58 -04:00
return
}
2017-08-19 14:36:56 +00:00
// This is a bit of a hack.
ffs := m . folderCfgs [ folder ] . Filesystem ( )
if ffs . Type ( ) != fs . FilesystemTypeBasic {
return
}
folderLocation := ffs . URI ( )
2015-10-18 20:13:58 -04:00
ignores := m . folderIgnores [ folder ]
var filesAtRisk [ ] string
for _ , protectedFilePath := range m . protectedFiles {
// check if file is synced in this folder
if ! strings . HasPrefix ( protectedFilePath , folderLocation ) {
continue
}
// check if file is ignored
2017-03-04 07:49:48 +00:00
relPath , _ := filepath . Rel ( folderLocation , protectedFilePath )
if ignores . Match ( relPath ) . IsIgnored ( ) {
2015-10-18 20:13:58 -04:00
continue
}
filesAtRisk = append ( filesAtRisk , protectedFilePath )
}
if len ( filesAtRisk ) > 0 {
2016-10-27 17:02:19 +00:00
l . Warnln ( "Some protected files may be overwritten and cause issues. See https://docs.syncthing.net/users/config.html#syncing-configuration-files for more information. The at risk files are:" , strings . Join ( filesAtRisk , ", " ) )
2015-10-18 20:13:58 -04:00
}
}
2016-08-07 16:21:59 +00:00
func ( m * Model ) AddFolder ( cfg config . FolderConfiguration ) {
if len ( cfg . ID ) == 0 {
panic ( "cannot add empty folder id" )
}
2017-08-19 14:36:56 +00:00
if len ( cfg . Path ) == 0 {
panic ( "cannot add empty folder path" )
}
2016-08-07 16:21:59 +00:00
m . fmut . Lock ( )
m . addFolderLocked ( cfg )
m . fmut . Unlock ( )
}
func ( m * Model ) addFolderLocked ( cfg config . FolderConfiguration ) {
m . folderCfgs [ cfg . ID ] = cfg
2017-08-19 14:36:56 +00:00
folderFs := cfg . Filesystem ( )
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , folderFs , m . db )
2016-08-07 16:21:59 +00:00
2016-11-07 16:40:48 +00:00
for _ , device := range cfg . Devices {
m . folderDevices . set ( device . DeviceID , cfg . ID )
2016-08-07 16:21:59 +00:00
m . deviceFolders [ device . DeviceID ] = append ( m . deviceFolders [ device . DeviceID ] , cfg . ID )
}
2017-08-19 14:36:56 +00:00
ignores := ignore . New ( folderFs , ignore . WithCache ( m . cacheIgnoredFiles ) )
if err := ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
2016-08-07 16:21:59 +00:00
l . Warnln ( "Loading ignores:" , err )
}
m . folderIgnores [ cfg . ID ] = ignores
}
2017-10-03 23:53:02 +01:00
func ( m * Model ) RemoveFolder ( cfg config . FolderConfiguration ) {
2015-11-13 13:30:52 +01:00
m . fmut . Lock ( )
m . pmut . Lock ( )
2017-01-07 17:05:30 +00:00
// Delete syncthing specific files
2017-11-05 12:18:05 +00:00
cfg . Filesystem ( ) . RemoveAll ( config . DefaultMarkerName )
2017-01-07 17:05:30 +00:00
2017-10-03 23:53:02 +01:00
m . tearDownFolderLocked ( cfg . ID )
2016-08-07 16:21:59 +00:00
// Remove it from the database
2017-10-03 23:53:02 +01:00
db . DropFolder ( m . db , cfg . ID )
2016-08-07 16:21:59 +00:00
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
func ( m * Model ) tearDownFolderLocked ( folder string ) {
2015-11-13 13:30:52 +01:00
// Stop the services running for this folder
for _ , id := range m . folderRunnerTokens [ folder ] {
m . Remove ( id )
}
// Close connections to affected devices
2016-11-07 16:40:48 +00:00
for dev := range m . folderDevices [ folder ] {
2015-11-13 13:30:52 +01:00
if conn , ok := m . conn [ dev ] ; ok {
closeRawConn ( conn )
}
}
// Clean up our config maps
delete ( m . folderCfgs , folder )
delete ( m . folderFiles , folder )
delete ( m . folderDevices , folder )
delete ( m . folderIgnores , folder )
delete ( m . folderRunners , folder )
delete ( m . folderRunnerTokens , folder )
delete ( m . folderStatRefs , folder )
for dev , folders := range m . deviceFolders {
m . deviceFolders [ dev ] = stringSliceWithout ( folders , folder )
}
2016-08-07 16:21:59 +00:00
}
2015-11-13 13:30:52 +01:00
2016-08-07 16:21:59 +00:00
func ( m * Model ) RestartFolder ( cfg config . FolderConfiguration ) {
if len ( cfg . ID ) == 0 {
panic ( "cannot add empty folder id" )
}
m . fmut . Lock ( )
m . pmut . Lock ( )
m . tearDownFolderLocked ( cfg . ID )
2017-12-29 13:14:39 +00:00
if cfg . Paused {
l . Infoln ( "Paused folder" , cfg . Description ( ) )
} else {
2016-12-21 18:41:25 +00:00
m . addFolderLocked ( cfg )
folderType := m . startFolderLocked ( cfg . ID )
l . Infoln ( "Restarted folder" , cfg . Description ( ) , fmt . Sprintf ( "(%s)" , folderType ) )
}
2015-11-13 13:30:52 +01:00
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
2017-11-09 21:16:29 +00:00
func ( m * Model ) UsageReportingStats ( version int , preview bool ) map [ string ] interface { } {
2017-10-12 06:16:46 +00:00
stats := make ( map [ string ] interface { } )
if version >= 3 {
// Block stats
2017-11-09 21:16:29 +00:00
blockStatsMut . Lock ( )
copyBlockStats := make ( map [ string ] int )
for k , v := range blockStats {
copyBlockStats [ k ] = v
if ! preview {
blockStats [ k ] = 0
2017-10-12 06:16:46 +00:00
}
}
2017-11-09 21:16:29 +00:00
blockStatsMut . Unlock ( )
stats [ "blockStats" ] = copyBlockStats
2017-10-12 06:16:46 +00:00
// Transport stats
m . pmut . Lock ( )
transportStats := make ( map [ string ] int )
for _ , conn := range m . conn {
transportStats [ conn . Transport ( ) ] ++
}
m . pmut . Unlock ( )
stats [ "transportStats" ] = transportStats
// Ignore stats
ignoreStats := map [ string ] int {
"lines" : 0 ,
"inverts" : 0 ,
"folded" : 0 ,
"deletable" : 0 ,
"rooted" : 0 ,
"includes" : 0 ,
"escapedIncludes" : 0 ,
"doubleStars" : 0 ,
"stars" : 0 ,
}
var seenPrefix [ 3 ] bool
for folder := range m . cfg . Folders ( ) {
lines , _ , err := m . GetIgnores ( folder )
if err != nil {
continue
}
ignoreStats [ "lines" ] += len ( lines )
for _ , line := range lines {
// Allow prefixes to be specified in any order, but only once.
for {
if strings . HasPrefix ( line , "!" ) && ! seenPrefix [ 0 ] {
seenPrefix [ 0 ] = true
line = line [ 1 : ]
ignoreStats [ "inverts" ] += 1
} else if strings . HasPrefix ( line , "(?i)" ) && ! seenPrefix [ 1 ] {
seenPrefix [ 1 ] = true
line = line [ 4 : ]
ignoreStats [ "folded" ] += 1
} else if strings . HasPrefix ( line , "(?d)" ) && ! seenPrefix [ 2 ] {
seenPrefix [ 2 ] = true
line = line [ 4 : ]
ignoreStats [ "deletable" ] += 1
} else {
seenPrefix [ 0 ] = false
seenPrefix [ 1 ] = false
seenPrefix [ 2 ] = false
break
}
}
// Noops, remove
if strings . HasSuffix ( line , "**" ) {
line = line [ : len ( line ) - 2 ]
}
if strings . HasPrefix ( line , "**/" ) {
line = line [ 3 : ]
}
if strings . HasPrefix ( line , "/" ) {
ignoreStats [ "rooted" ] += 1
} else if strings . HasPrefix ( line , "#include " ) {
ignoreStats [ "includes" ] += 1
if strings . Contains ( line , ".." ) {
ignoreStats [ "escapedIncludes" ] += 1
}
}
if strings . Contains ( line , "**" ) {
ignoreStats [ "doubleStars" ] += 1
// Remove not to trip up star checks.
strings . Replace ( line , "**" , "" , - 1 )
}
if strings . Contains ( line , "*" ) {
ignoreStats [ "stars" ] += 1
}
}
}
stats [ "ignoreStats" ] = ignoreStats
}
return stats
}
2014-01-05 23:54:57 +01:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 21:56:10 +02:00
Connected bool
Paused bool
2014-01-23 13:12:45 +01:00
Address string
ClientVersion string
2016-05-04 19:38:12 +00:00
Type string
2014-01-05 23:54:57 +01:00
}
2015-03-10 23:45:43 +01:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 21:56:10 +02:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 23:45:43 +01:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2016-05-04 19:38:12 +00:00
"type" : info . Type ,
2015-03-10 23:45:43 +01:00
} )
}
2015-11-09 23:48:58 +01:00
// ConnectionStats returns a map with connection statistics for each device.
2015-04-07 13:20:40 +01:00
func ( m * Model ) ConnectionStats ( ) map [ string ] interface { } {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-09-14 19:38:55 +00:00
m . pmut . RLock ( )
2014-01-05 16:16:37 +01:00
2015-08-23 21:56:10 +02:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
2016-12-21 18:41:25 +00:00
for device , deviceCfg := range devs {
2016-03-25 20:29:07 +00:00
hello := m . helloMessages [ device ]
versionString := hello . ClientVersion
if hello . ClientName != "syncthing" {
versionString = hello . ClientName + " " + hello . ClientVersion
}
2014-01-05 23:54:57 +01:00
ci := ConnectionInfo {
2016-05-04 19:38:12 +00:00
ClientVersion : strings . TrimSpace ( versionString ) ,
2016-12-21 18:41:25 +00:00
Paused : deviceCfg . Paused ,
2014-01-05 23:54:57 +01:00
}
2015-08-23 21:56:10 +02:00
if conn , ok := m . conn [ device ] ; ok {
2016-11-30 07:54:20 +00:00
ci . Type = conn . Type ( )
2015-08-23 21:56:10 +02:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 23:54:57 +01:00
}
2014-02-13 12:41:37 +01:00
2015-04-07 13:20:40 +01:00
conns [ device . String ( ) ] = ci
2013-12-30 09:30:29 -05:00
}
2014-01-17 20:06:44 -07:00
2015-04-07 13:20:40 +01:00
res [ "connections" ] = conns
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2016-09-14 19:38:55 +00:00
m . fmut . RUnlock ( )
2014-03-28 14:36:57 +01:00
2014-05-24 21:34:11 +02:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 21:56:05 +02:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 21:34:11 +02:00
} ,
}
2014-01-05 16:16:37 +01:00
return res
2013-12-30 09:30:29 -05:00
}
2015-04-28 22:32:10 +02:00
// DeviceStatistics returns statistics about each device
2014-09-28 12:00:38 +01:00
func ( m * Model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
2016-12-06 08:54:04 +00:00
res := make ( map [ string ] stats . DeviceStatistics )
2014-10-06 09:25:45 +02:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 23:45:40 +01:00
}
return res
}
2015-04-28 22:32:10 +02:00
// FolderStatistics returns statistics about each folder
2014-12-07 20:21:12 +00:00
func ( m * Model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
2016-12-06 08:54:04 +00:00
res := make ( map [ string ] stats . FolderStatistics )
2014-12-07 20:21:12 +00:00
for id := range m . cfg . Folders ( ) {
res [ id ] = m . folderStatRef ( id ) . GetStatistics ( )
}
return res
}
2016-08-12 06:41:43 +00:00
type FolderCompletion struct {
CompletionPct float64
NeedBytes int64
2017-12-15 20:01:56 +00:00
NeedItems int64
2016-08-12 06:41:43 +00:00
GlobalBytes int64
2016-09-02 06:45:46 +00:00
NeedDeletes int64
2016-08-12 06:41:43 +00:00
}
2015-04-28 22:32:10 +02:00
// Completion returns the completion status, in percent, for the given device
// and folder.
2016-08-12 06:41:43 +00:00
func ( m * Model ) Completion ( device protocol . DeviceID , folder string ) FolderCompletion {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-08-05 20:16:25 +02:00
if ! ok {
2016-08-12 06:41:43 +00:00
return FolderCompletion { } // Folder doesn't exist, so we hardly have any of it
2014-08-05 20:16:25 +02:00
}
2016-10-17 14:10:17 +02:00
tot := rf . GlobalSize ( ) . Bytes
2014-08-05 20:16:25 +02:00
if tot == 0 {
2016-08-12 06:41:43 +00:00
// Folder is empty, so we have all of it
return FolderCompletion {
CompletionPct : 100 ,
}
2014-08-05 20:16:25 +02:00
}
2016-05-26 06:53:27 +00:00
m . pmut . RLock ( )
counts := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
m . pmut . RUnlock ( )
2017-12-15 20:01:56 +00:00
var need , items , fileNeed , downloaded , deletes int64
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2016-05-26 06:53:27 +00:00
ft := f . ( db . FileInfoTruncated )
2016-09-02 06:45:46 +00:00
// If the file is deleted, we account it only in the deleted column.
if ft . Deleted {
deletes ++
return true
}
2016-05-26 06:53:27 +00:00
// This might might be more than it really is, because some blocks can be of a smaller size.
downloaded = int64 ( counts [ ft . Name ] * protocol . BlockSize )
2016-09-02 06:45:46 +00:00
fileNeed = ft . FileSize ( ) - downloaded
2016-05-26 06:53:27 +00:00
if fileNeed < 0 {
fileNeed = 0
}
need += fileNeed
2017-12-15 20:01:56 +00:00
items ++
2014-07-29 11:06:52 +02:00
return true
} )
2015-10-21 09:10:26 +02:00
needRatio := float64 ( need ) / float64 ( tot )
completionPct := 100 * ( 1 - needRatio )
2016-09-02 06:45:46 +00:00
// If the completion is 100% but there are deletes we need to handle,
// drop it down a notch. Hack for consumers that look only at the
2017-07-20 13:10:46 +00:00
// percentage (our own GUI does the same calculation as here on its own
2016-09-02 06:45:46 +00:00
// and needs the same fixup).
if need == 0 && deletes > 0 {
completionPct = 95 // chosen by fair dice roll
}
2015-10-21 09:10:26 +02:00
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d = %f)" , m , device , folder , completionPct , need , tot , needRatio )
2014-08-12 13:53:31 +02:00
2016-08-12 06:41:43 +00:00
return FolderCompletion {
CompletionPct : completionPct ,
NeedBytes : need ,
2017-12-15 20:01:56 +00:00
NeedItems : items ,
2016-08-12 06:41:43 +00:00
GlobalBytes : tot ,
2016-09-02 06:45:46 +00:00
NeedDeletes : deletes ,
2016-08-12 06:41:43 +00:00
}
2014-07-29 11:06:52 +02:00
}
2016-10-17 14:10:17 +02:00
func addSizeOfFile ( s * db . Counts , f db . FileIntf ) {
switch {
case f . IsDeleted ( ) :
s . Deleted ++
case f . IsDirectory ( ) :
s . Directories ++
case f . IsSymlink ( ) :
s . Symlinks ++
default :
s . Files ++
2013-12-30 09:30:29 -05:00
}
2016-10-17 14:10:17 +02:00
s . Bytes += f . FileSize ( )
2014-01-05 16:16:37 +01:00
}
2013-12-30 09:30:29 -05:00
2014-03-28 14:36:57 +01:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2016-10-17 14:10:17 +02:00
func ( m * Model ) GlobalSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-10-17 14:10:17 +02:00
return rf . GlobalSize ( )
2014-03-29 18:53:48 +01:00
}
2016-10-17 14:10:17 +02:00
return db . Counts { }
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 12:00:38 +01:00
// files in the local folder.
2016-10-17 14:10:17 +02:00
func ( m * Model ) LocalSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-10-17 14:10:17 +02:00
return rf . LocalSize ( )
2014-03-29 18:53:48 +01:00
}
2016-10-17 14:10:17 +02:00
return db . Counts { }
2014-01-06 06:38:01 +01:00
}
2014-05-19 22:31:28 +02:00
// NeedSize returns the number and total size of currently needed files.
2016-10-17 14:10:17 +02:00
func ( m * Model ) NeedSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2016-10-17 14:10:17 +02:00
var result db . Counts
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-08-05 07:13:52 +00:00
cfg := m . folderCfgs [ folder ]
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2017-11-11 19:18:17 +00:00
if cfg . IgnoreDelete && f . IsDeleted ( ) {
2016-08-05 07:13:52 +00:00
return true
}
2016-10-17 14:10:17 +02:00
addSizeOfFile ( & result , f )
2014-07-15 17:54:00 +02:00
return true
} )
}
2016-10-17 14:10:17 +02:00
result . Bytes -= m . progressEmitter . BytesCompleted ( folder )
l . Debugf ( "%v NeedSize(%q): %v" , m , folder , result )
return result
2013-12-23 12:12:44 -05:00
}
2015-04-28 22:32:10 +02:00
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2017-12-15 20:01:56 +00:00
func ( m * Model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2015-01-17 21:51:46 +01:00
2015-04-25 22:53:44 +01:00
rf , ok := m . folderFiles [ folder ]
if ! ok {
2017-12-15 20:01:56 +00:00
return nil , nil , nil
2015-04-25 22:53:44 +01:00
}
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
runner , ok := m . folderRunners [ folder ]
if ok {
allProgressNames , allQueuedNames := runner . Jobs ( )
var progressNames , queuedNames [ ] string
progressNames , skip , get = getChunk ( allProgressNames , skip , get )
queuedNames , skip , get = getChunk ( allQueuedNames , skip , get )
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
seen [ name ] = struct { } { }
2014-12-01 19:23:06 +00:00
}
}
2015-04-25 22:53:44 +01:00
for i , name := range queuedNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
seen [ name ] = struct { } { }
}
2014-12-01 19:23:06 +00:00
}
2014-04-09 22:03:30 +02:00
}
2015-04-25 22:53:44 +01:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
2016-08-05 07:13:52 +00:00
cfg := m . folderCfgs [ folder ]
2015-04-25 22:53:44 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2017-11-11 19:18:17 +00:00
if cfg . IgnoreDelete && f . IsDeleted ( ) {
2016-08-05 07:13:52 +00:00
return true
}
2015-04-25 22:53:44 +01:00
if skip > 0 {
skip --
return true
}
if get > 0 {
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
}
}
2017-12-15 20:01:56 +00:00
return get > 0
2015-04-25 22:53:44 +01:00
} )
2017-12-15 20:01:56 +00:00
return progress , queued , rest
}
// RemoteNeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
func ( m * Model ) RemoteNeedFolderFiles ( device protocol . DeviceID , folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , error ) {
m . fmut . RLock ( )
m . pmut . RLock ( )
if err := m . checkDeviceFolderConnectedLocked ( device , folder ) ; err != nil {
m . pmut . RUnlock ( )
m . fmut . RUnlock ( )
return nil , err
}
rf := m . folderFiles [ folder ]
m . pmut . RUnlock ( )
m . fmut . RUnlock ( )
files := make ( [ ] db . FileInfoTruncated , 0 , perpage )
skip := ( page - 1 ) * perpage
get := perpage
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
if skip > 0 {
skip --
return true
}
if get > 0 {
files = append ( files , f . ( db . FileInfoTruncated ) )
get --
}
return get > 0
} )
return files , nil
2014-04-01 23:18:32 +02:00
}
2014-09-28 12:00:38 +01:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2016-07-04 10:40:29 +00:00
func ( m * Model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2018-01-19 14:33:16 +00:00
m . handleIndex ( deviceID , folder , fs , false )
2013-12-28 08:10:36 -05:00
}
2014-09-28 12:00:38 +01:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2016-07-04 10:40:29 +00:00
func ( m * Model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2018-01-19 14:33:16 +00:00
m . handleIndex ( deviceID , folder , fs , true )
}
func ( m * Model ) handleIndex ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , update bool ) {
op := "Index"
if update {
op += " update"
}
l . Debugf ( "%v (in): %s / %q: %d files" , op , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
2018-01-19 14:33:16 +00:00
l . Debugf ( "%v for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , op , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2018-01-19 14:33:16 +00:00
files , existing := m . folderFiles [ folder ]
runner , running := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
2018-01-19 14:33:16 +00:00
if ! existing {
l . Fatalf ( "%v for nonexistent folder %q" , op , folder )
}
if running {
defer runner . SchedulePull ( )
} else if update {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
l . Fatalf ( "%v for not running folder %q" , op , folder )
2013-12-28 08:10:36 -05:00
}
2014-07-13 21:07:24 +02:00
2016-05-01 06:49:29 +00:00
m . pmut . RLock ( )
m . deviceDownloads [ deviceID ] . Update ( folder , makeForgetUpdate ( fs ) )
m . pmut . RUnlock ( )
2018-01-19 14:33:16 +00:00
if ! update {
files . Drop ( deviceID )
}
2014-09-28 12:00:38 +01:00
files . Update ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2016-07-29 19:54:24 +00:00
"version" : files . Sequence ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2014-01-09 10:59:09 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) folderSharedWith ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-12-06 08:54:04 +00:00
shared := m . folderSharedWithLocked ( folder , deviceID )
m . fmut . RUnlock ( )
return shared
2016-01-01 20:11:12 +01:00
}
2016-08-07 16:21:59 +00:00
func ( m * Model ) folderSharedWithLocked ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:00:38 +01:00
for _ , nfolder := range m . deviceFolders [ deviceID ] {
if nfolder == folder {
2014-06-06 21:48:29 +02:00
return true
}
}
return false
}
2016-07-04 10:40:29 +00:00
func ( m * Model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfig ) {
2016-01-01 20:11:12 +01:00
// Check the peer device's announced folders against our own. Emits events
// for folders that we don't expect (unknown or not shared).
2016-04-15 10:59:41 +00:00
// Also, collect a list of folders we do share, and if he's interested in
// temporary indexes, subscribe the connection.
tempIndexFolders := make ( [ ] string , 0 , len ( cm . Folders ) )
2016-01-01 20:11:12 +01:00
2016-07-23 12:46:31 +00:00
m . pmut . RLock ( )
2016-07-27 21:36:25 +00:00
conn , ok := m . conn [ deviceID ]
2016-12-17 19:48:33 +00:00
hello := m . helloMessages [ deviceID ]
2016-07-23 12:46:31 +00:00
m . pmut . RUnlock ( )
2016-07-27 21:36:25 +00:00
if ! ok {
panic ( "bug: ClusterConfig called on closed or nonexistent connection" )
}
2016-07-23 12:46:31 +00:00
2016-07-27 21:38:43 +00:00
dbLocation := filepath . Dir ( m . db . Location ( ) )
2017-12-07 07:08:24 +00:00
changed := false
deviceCfg := m . cfg . Devices ( ) [ deviceID ]
2016-07-27 21:38:43 +00:00
2016-12-17 19:48:33 +00:00
// See issue #3802 - in short, we can't send modern symlink entries to older
// clients.
dropSymlinks := false
if hello . ClientName == m . clientName && upgrade . CompareVersions ( hello . ClientVersion , "v0.14.14" ) < 0 {
l . Warnln ( "Not sending symlinks to old client" , deviceID , "- please upgrade to v0.14.14 or newer" )
dropSymlinks = true
}
2017-12-07 07:08:24 +00:00
// Needs to happen outside of the fmut, as can cause CommitConfiguration
if deviceCfg . AutoAcceptFolders {
for _ , folder := range cm . Folders {
changed = m . handleAutoAccepts ( deviceCfg , folder ) || changed
}
}
2016-07-04 10:40:29 +00:00
m . fmut . Lock ( )
2016-12-21 18:41:25 +00:00
var paused [ ] string
2016-01-01 20:11:12 +01:00
for _ , folder := range cm . Folders {
2016-12-21 18:41:25 +00:00
if folder . Paused {
paused = append ( paused , folder . ID )
continue
}
if cfg , ok := m . cfg . Folder ( folder . ID ) ; ok && cfg . Paused {
continue
}
2017-05-31 18:04:00 +00:00
if m . cfg . IgnoredFolder ( folder . ID ) {
l . Infof ( "Ignoring folder %s from device %s since we are configured to" , folder . Description ( ) , deviceID )
continue
}
2016-08-07 16:21:59 +00:00
if ! m . folderSharedWithLocked ( folder . ID , deviceID ) {
2016-01-01 20:11:12 +01:00
events . Default . Log ( events . FolderRejected , map [ string ] string {
2016-03-11 09:48:46 +00:00
"folder" : folder . ID ,
"folderLabel" : folder . Label ,
"device" : deviceID . String ( ) ,
2016-01-01 20:11:12 +01:00
} )
2016-11-22 08:36:14 +01:00
l . Infof ( "Unexpected folder %s sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder . Description ( ) , deviceID )
2016-01-01 20:11:12 +01:00
continue
}
2017-12-07 07:08:24 +00:00
2016-07-04 10:40:29 +00:00
if ! folder . DisableTempIndexes {
2016-04-15 10:59:41 +00:00
tempIndexFolders = append ( tempIndexFolders , folder . ID )
}
2016-07-23 12:46:31 +00:00
fs := m . folderFiles [ folder . ID ]
myIndexID := fs . IndexID ( protocol . LocalDeviceID )
2016-07-29 19:54:24 +00:00
mySequence := fs . Sequence ( protocol . LocalDeviceID )
var startSequence int64
2016-07-23 12:46:31 +00:00
for _ , dev := range folder . Devices {
2016-10-29 21:56:24 +00:00
if dev . ID == m . id {
2016-07-23 12:46:31 +00:00
// This is the other side's description of what it knows
// about us. Lets check to see if we can start sending index
// updates directly or need to send the index from start...
if dev . IndexID == myIndexID {
// They say they've seen our index ID before, so we can
// send a delta update only.
2016-07-29 19:54:24 +00:00
if dev . MaxSequence > mySequence {
2016-07-23 12:46:31 +00:00
// Safety check. They claim to have more or newer
// index data than we have - either we have lost
// index data, or reset the index without resetting
// the IndexID, or something else weird has
// happened. We send a full index to reset the
// situation.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s is delta index compatible, but seems out of sync with reality" , deviceID , folder . Description ( ) )
2016-07-29 19:54:24 +00:00
startSequence = 0
2016-07-23 12:46:31 +00:00
continue
}
2016-11-22 08:36:14 +01:00
l . Debugf ( "Device %v folder %s is delta index compatible (mlv=%d)" , deviceID , folder . Description ( ) , dev . MaxSequence )
2016-07-29 19:54:24 +00:00
startSequence = dev . MaxSequence
2016-07-23 12:46:31 +00:00
} else if dev . IndexID != 0 {
// They say they've seen an index ID from us, but it's
// not the right one. Either they are confused or we
// must have reset our database since last talking to
// them. We'll start with a full index transfer.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s has mismatching index ID for us (%v != %v)" , deviceID , folder . Description ( ) , dev . IndexID , myIndexID )
2016-07-29 19:54:24 +00:00
startSequence = 0
2016-07-23 12:46:31 +00:00
}
2016-10-29 21:56:24 +00:00
} else if dev . ID == deviceID && dev . IndexID != 0 {
2016-07-23 12:46:31 +00:00
// This is the other side's description of themselves. We
// check to see that it matches the IndexID we have on file,
// otherwise we drop our old index data and expect to get a
// completely new set.
theirIndexID := fs . IndexID ( deviceID )
if dev . IndexID == 0 {
// They're not announcing an index ID. This means they
// do not support delta indexes and we should clear any
// information we have from them before accepting their
// index, which will presumably be a full index.
2017-11-12 20:20:34 +00:00
fs . Drop ( deviceID )
2016-07-23 12:46:31 +00:00
} else if dev . IndexID != theirIndexID {
// The index ID we have on file is not what they're
// announcing. They must have reset their database and
// will probably send us a full index. We drop any
// information we have and remember this new index ID
// instead.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s has a new index ID (%v)" , deviceID , folder . Description ( ) , dev . IndexID )
2017-11-12 20:20:34 +00:00
fs . Drop ( deviceID )
2016-07-23 12:46:31 +00:00
fs . SetIndexID ( deviceID , dev . IndexID )
2016-07-27 21:35:41 +00:00
} else {
// They're sending a recognized index ID and will most
// likely use delta indexes. We might already have files
// that we need to pull so let the folder runner know
// that it should recheck the index data.
if runner := m . folderRunners [ folder . ID ] ; runner != nil {
2017-11-07 06:59:35 +00:00
defer runner . SchedulePull ( )
2016-07-27 21:35:41 +00:00
}
2016-07-23 12:46:31 +00:00
}
}
}
2016-12-17 19:48:33 +00:00
go sendIndexes ( conn , folder . ID , fs , m . folderIgnores [ folder . ID ] , startSequence , dbLocation , dropSymlinks )
2016-01-01 20:11:12 +01:00
}
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
m . remotePausedFolders [ deviceID ] = paused
m . pmut . Unlock ( )
2016-04-15 10:59:41 +00:00
// This breaks if we send multiple CM messages during the same connection.
if len ( tempIndexFolders ) > 0 {
m . pmut . RLock ( )
conn , ok := m . conn [ deviceID ]
m . pmut . RUnlock ( )
// In case we've got ClusterConfig, and the connection disappeared
// from infront of our nose.
if ok {
m . progressEmitter . temporaryIndexSubscribe ( conn , tempIndexFolders )
}
}
2017-12-07 07:08:24 +00:00
if deviceCfg . Introducer {
2016-11-07 16:40:48 +00:00
foldersDevices , introduced := m . handleIntroductions ( deviceCfg , cm )
if introduced {
changed = true
}
// If permitted, check if the introducer has unshare devices/folders with
// some of the devices/folders that we know were introduced to us by him.
if ! deviceCfg . SkipIntroductionRemovals && m . handleDeintroductions ( deviceCfg , cm , foldersDevices ) {
changed = true
}
}
2016-11-17 08:50:24 +00:00
m . fmut . Unlock ( )
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
if changed {
if err := m . cfg . Save ( ) ; err != nil {
l . Warnln ( "Failed to save config" , err )
}
}
}
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
// handleIntroductions handles adding devices/shares that are shared by an introducer device
func ( m * Model ) handleIntroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig ) ( folderDeviceSet , bool ) {
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing, remove what we have extra that
// has been introducer by the introducer.
changed := false
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
foldersDevices := make ( folderDeviceSet )
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
for _ , folder := range cm . Folders {
// Adds devices which we do not have, but the introducer has
// for the folders that we have in common. Also, shares folders
// with devices that we have in common, yet are currently not sharing
// the folder.
2018-01-03 07:42:25 +00:00
fcfg , ok := m . cfg . Folder ( folder . ID )
if ! ok {
// Don't have this folder, carry on.
continue
}
2016-11-07 16:40:48 +00:00
nextDevice :
for _ , device := range folder . Devices {
2017-12-07 07:08:24 +00:00
// No need to share with self.
if device . ID == m . id {
continue
}
2016-11-07 16:40:48 +00:00
foldersDevices . set ( device . ID , folder . ID )
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
if _ , ok := m . cfg . Devices ( ) [ device . ID ] ; ! ok {
// The device is currently unknown. Add it to the config.
m . introduceDevice ( device , introducerCfg )
changed = true
2018-01-03 07:42:25 +00:00
} else {
for _ , dev := range fcfg . DeviceIDs ( ) {
if dev == device . ID {
// We already share the folder with this device, so
// nothing to do.
continue nextDevice
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2017-12-07 07:08:24 +00:00
l . Infof ( "Sharing folder %s with %v (vouched for by introducer %v)" , folder . Description ( ) , device . ID , introducerCfg . DeviceID )
2018-01-03 07:42:25 +00:00
fcfg . Devices = append ( fcfg . Devices , config . FolderDeviceConfiguration {
DeviceID : device . ID ,
IntroducedBy : introducerCfg . DeviceID ,
} )
2016-11-07 16:40:48 +00:00
changed = true
}
2018-01-03 07:42:25 +00:00
if changed {
m . cfg . SetFolder ( fcfg )
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
return foldersDevices , changed
}
2016-11-07 16:40:48 +00:00
2017-12-07 07:08:24 +00:00
// handleDeintroductions handles removals of devices/shares that are removed by an introducer device
2016-11-07 16:40:48 +00:00
func ( m * Model ) handleDeintroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig , foldersDevices folderDeviceSet ) bool {
changed := false
foldersIntroducedByOthers := make ( folderDeviceSet )
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// Check if we should unshare some folders, if the introducer has unshared them.
for _ , folderCfg := range m . cfg . Folders ( ) {
folderChanged := false
for i := 0 ; i < len ( folderCfg . Devices ) ; i ++ {
if folderCfg . Devices [ i ] . IntroducedBy == introducerCfg . DeviceID {
if ! foldersDevices . has ( folderCfg . Devices [ i ] . DeviceID , folderCfg . ID ) {
2016-11-17 08:56:55 +00:00
// We could not find that folder shared on the
// introducer with the device that was introduced to us.
2017-11-04 07:20:11 +00:00
// We should follow and unshare as well.
2016-11-17 17:12:41 +02:00
l . Infof ( "Unsharing folder %s with %v as introducer %v no longer shares the folder with that device" , folderCfg . Description ( ) , folderCfg . Devices [ i ] . DeviceID , folderCfg . Devices [ i ] . IntroducedBy )
2016-11-07 16:40:48 +00:00
folderCfg . Devices = append ( folderCfg . Devices [ : i ] , folderCfg . Devices [ i + 1 : ] ... )
i --
folderChanged = true
}
} else {
foldersIntroducedByOthers . set ( folderCfg . Devices [ i ] . DeviceID , folderCfg . ID )
}
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// We've modified the folder, hence update it.
if folderChanged {
m . cfg . SetFolder ( folderCfg )
changed = true
}
}
2016-11-07 16:40:48 +00:00
2016-11-17 08:56:55 +00:00
// Check if we should remove some devices, if the introducer no longer
// shares any folder with them. Yet do not remove if we share other
// folders that haven't been introduced by the introducer.
for _ , device := range m . cfg . Devices ( ) {
if device . IntroducedBy == introducerCfg . DeviceID {
if ! foldersDevices . hasDevice ( device . DeviceID ) {
if foldersIntroducedByOthers . hasDevice ( device . DeviceID ) {
l . Infof ( "Would have removed %v as %v no longer shares any folders, yet there are other folders that are shared with this device that haven't been introduced by this introducer." , device . DeviceID , device . IntroducedBy )
2016-11-07 16:40:48 +00:00
continue
}
2016-11-17 08:56:55 +00:00
// The introducer no longer shares any folder with the
// device, remove the device.
l . Infof ( "Removing device %v as introducer %v no longer shares any folders with that device" , device . DeviceID , device . IntroducedBy )
m . cfg . RemoveDevice ( device . DeviceID )
changed = true
2016-11-12 08:38:29 +01:00
}
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
return changed
}
2017-12-07 07:08:24 +00:00
// handleAutoAccepts handles adding and sharing folders for devices that have
// AutoAcceptFolders set to true.
func ( m * Model ) handleAutoAccepts ( deviceCfg config . DeviceConfiguration , folder protocol . Folder ) bool {
2018-01-03 07:42:25 +00:00
if cfg , ok := m . cfg . Folder ( folder . ID ) ; ! ok {
2017-12-07 07:08:24 +00:00
defaultPath := m . cfg . Options ( ) . DefaultFolderPath
defaultPathFs := fs . NewFilesystem ( fs . FilesystemTypeBasic , defaultPath )
for _ , path := range [ ] string { folder . Label , folder . ID } {
if _ , err := defaultPathFs . Lstat ( path ) ; ! fs . IsNotExist ( err ) {
continue
}
fcfg := config . NewFolderConfiguration ( m . id , folder . ID , folder . Label , fs . FilesystemTypeBasic , filepath . Join ( defaultPath , path ) )
2018-01-03 07:42:25 +00:00
fcfg . Devices = append ( fcfg . Devices , config . FolderDeviceConfiguration {
DeviceID : deviceCfg . DeviceID ,
} )
2017-12-07 07:08:24 +00:00
// Need to wait for the waiter, as this calls CommitConfiguration,
// which sets up the folder and as we return from this call,
// ClusterConfig starts poking at m.folderFiles and other things
// that might not exist until the config is committed.
w , _ := m . cfg . SetFolder ( fcfg )
w . Wait ( )
l . Infof ( "Auto-accepted %s folder %s at path %s" , deviceCfg . DeviceID , folder . Description ( ) , fcfg . Path )
return true
}
l . Infof ( "Failed to auto-accept folder %s from %s due to path conflict" , folder . Description ( ) , deviceCfg . DeviceID )
return false
2018-01-03 07:42:25 +00:00
} else {
for _ , device := range cfg . DeviceIDs ( ) {
if device == deviceCfg . DeviceID {
// Already shared nothing todo.
return false
}
}
cfg . Devices = append ( cfg . Devices , config . FolderDeviceConfiguration {
DeviceID : deviceCfg . DeviceID ,
} )
w , _ := m . cfg . SetFolder ( cfg )
w . Wait ( )
l . Infof ( "Shared %s with %s due to auto-accept" , folder . ID , deviceCfg . DeviceID )
return true
2017-12-07 07:08:24 +00:00
}
}
2016-11-07 16:40:48 +00:00
func ( m * Model ) introduceDevice ( device protocol . Device , introducerCfg config . DeviceConfiguration ) {
addresses := [ ] string { "dynamic" }
for _ , addr := range device . Addresses {
if addr != "dynamic" {
addresses = append ( addresses , addr )
}
}
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , device . ID , introducerCfg . DeviceID )
newDeviceCfg := config . DeviceConfiguration {
DeviceID : device . ID ,
Name : device . Name ,
Compression : introducerCfg . Compression ,
Addresses : addresses ,
CertName : device . CertName ,
IntroducedBy : introducerCfg . DeviceID ,
}
// The introducers' introducers are also our introducers.
if device . Introducer {
l . Infof ( "Device %v is now also an introducer" , device . ID )
newDeviceCfg . Introducer = true
newDeviceCfg . SkipIntroductionRemovals = device . SkipIntroductionRemovals
2014-09-23 16:04:20 +02:00
}
2016-11-07 16:40:48 +00:00
m . cfg . SetDevice ( newDeviceCfg )
}
2016-08-10 09:37:32 +00:00
// Closed is called when a connection has been closed
func ( m * Model ) Closed ( conn protocol . Connection , err error ) {
device := conn . ID ( )
2014-02-09 23:13:06 +01:00
2014-07-15 13:04:37 +02:00
m . pmut . Lock ( )
2015-06-28 16:05:29 +01:00
conn , ok := m . conn [ device ]
2014-01-01 08:09:17 -05:00
if ok {
2016-04-15 10:59:41 +00:00
m . progressEmitter . temporaryIndexUnsubscribe ( conn )
2013-12-30 21:21:57 -05:00
}
2015-06-28 16:05:29 +01:00
delete ( m . conn , device )
2016-03-25 20:29:07 +00:00
delete ( m . helloMessages , device )
2016-04-15 10:59:41 +00:00
delete ( m . deviceDownloads , device )
2016-12-21 18:41:25 +00:00
delete ( m . remotePausedFolders , device )
2016-08-10 09:37:32 +00:00
closed := m . closed [ device ]
delete ( m . closed , device )
m . pmut . Unlock ( )
2018-01-12 11:27:55 +00:00
l . Infof ( "Connection to %s at %s closed: %v" , device , conn . Name ( ) , err )
2016-08-10 09:37:32 +00:00
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
"error" : err . Error ( ) ,
} )
close ( closed )
}
2016-12-21 18:41:25 +00:00
// close will close the underlying connection for a given device
func ( m * Model ) close ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . closeLocked ( device )
m . pmut . Unlock ( )
}
// closeLocked will close the underlying connection for a given device
func ( m * Model ) closeLocked ( device protocol . DeviceID ) {
conn , ok := m . conn [ device ]
if ! ok {
// There is no connection to close
return
}
closeRawConn ( conn )
}
2014-01-06 11:11:18 +01:00
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2016-07-04 10:40:29 +00:00
func ( m * Model ) Request ( deviceID protocol . DeviceID , folder , name string , offset int64 , hash [ ] byte , fromTemporary bool , buf [ ] byte ) error {
2015-07-29 21:38:22 +01:00
if offset < 0 {
2015-10-19 14:13:47 +02:00
return protocol . ErrInvalid
2015-01-18 02:12:06 +01:00
}
2015-01-16 12:25:54 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2016-04-15 10:59:41 +00:00
return protocol . ErrNoSuchFile
2015-01-16 12:25:54 +01:00
}
2015-10-03 17:25:21 +02:00
if deviceID != protocol . LocalDeviceID {
2016-07-04 10:40:29 +00:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d t=%v" , m , deviceID , folder , name , offset , len ( buf ) , fromTemporary )
2013-12-15 11:43:31 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-04-15 10:59:41 +00:00
folderCfg := m . folderCfgs [ folder ]
2015-10-13 22:59:31 +09:00
folderIgnores := m . folderIgnores [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-11-09 04:26:52 +00:00
2017-08-19 14:36:56 +00:00
folderFs := folderCfg . Filesystem ( )
2015-10-13 22:59:31 +09:00
2016-12-01 14:00:11 +00:00
// Having passed the rootedJoinedPath check above, we know "name" is
// acceptable relative to "folderPath" and in canonical form, so we can
// trust it.
2017-09-02 05:52:38 +00:00
if fs . IsInternal ( name ) {
2016-12-01 14:00:11 +00:00
l . Debugf ( "%v REQ(in) for internal file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
return protocol . ErrNoSuchFile
}
if folderIgnores . Match ( name ) . IsIgnored ( ) {
l . Debugf ( "%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
return protocol . ErrNoSuchFile
2015-10-13 22:59:31 +09:00
}
2017-08-19 14:36:56 +00:00
if err := osutil . TraversesSymlink ( folderFs , filepath . Dir ( name ) ) ; err != nil {
2017-01-10 07:09:31 +00:00
l . Debugf ( "%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , len ( buf ) )
2016-12-13 10:24:10 +00:00
return protocol . ErrNoSuchFile
2016-04-15 10:59:41 +00:00
}
2014-12-08 11:54:22 +00:00
2016-04-15 10:59:41 +00:00
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
2016-07-04 10:40:29 +00:00
if fromTemporary && ! folderCfg . DisableTempIndexes {
2017-09-02 05:52:38 +00:00
tempFn := fs . TempName ( name )
2016-12-13 10:24:10 +00:00
2017-08-19 14:36:56 +00:00
if info , err := folderFs . Lstat ( tempFn ) ; err != nil || ! info . IsRegular ( ) {
2016-12-13 10:24:10 +00:00
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
return protocol . ErrNoSuchFile
}
2017-08-19 14:36:56 +00:00
if err := readOffsetIntoBuf ( folderFs , tempFn , offset , buf ) ; err == nil {
2016-04-15 10:59:41 +00:00
return nil
}
// Fall through to reading from a non-temp file, just incase the temp
// file has finished downloading.
2013-12-15 11:43:31 +01:00
}
2017-08-19 14:36:56 +00:00
if info , err := folderFs . Lstat ( name ) ; err != nil || ! info . IsRegular ( ) {
2016-12-13 10:24:10 +00:00
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
return protocol . ErrNoSuchFile
}
2017-08-19 14:36:56 +00:00
err := readOffsetIntoBuf ( folderFs , name , offset , buf )
if fs . IsNotExist ( err ) {
2016-04-15 10:59:41 +00:00
return protocol . ErrNoSuchFile
} else if err != nil {
2015-10-19 14:13:47 +02:00
return protocol . ErrGeneric
2013-12-15 11:43:31 +01:00
}
2015-07-29 21:38:22 +01:00
return nil
2013-12-15 11:43:31 +01:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
2016-12-06 08:54:04 +00:00
return fs . Get ( protocol . LocalDeviceID , file )
2014-04-01 23:18:32 +02:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
2016-12-06 08:54:04 +00:00
return fs . GetGlobal ( file )
2014-04-01 23:18:32 +02:00
}
2018-02-14 08:59:46 +01:00
type cFiler struct {
m * Model
r string
2014-01-06 11:11:18 +01:00
}
2018-02-14 08:59:46 +01:00
// Implements scanner.CurrentFiler
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
return cf . m . CurrentFolderFile ( cf . r , file )
2014-03-16 08:14:55 +01:00
}
2018-02-24 08:51:29 +01:00
// Connection returns the current connection for device, and a boolean whether a connection was found.
2017-11-21 07:25:38 +00:00
func ( m * Model ) Connection ( deviceID protocol . DeviceID ) ( connections . Connection , bool ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2017-11-21 07:25:38 +00:00
cn , ok := m . conn [ deviceID ]
2014-09-20 19:14:45 +02:00
m . pmut . RUnlock ( )
2014-09-10 11:29:01 +02:00
if ok {
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-10 11:29:01 +02:00
}
2017-11-21 07:25:38 +00:00
return cn , ok
2014-01-06 11:11:18 +01:00
}
2014-11-08 22:12:18 +01:00
func ( m * Model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2017-08-12 17:10:43 +00:00
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2017-08-22 06:48:25 +00:00
if ! ok {
cfg , ok = m . cfg . Folders ( ) [ folder ]
if ! ok {
return nil , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2017-04-01 09:58:06 +00:00
}
2017-08-22 06:48:25 +00:00
}
2014-09-14 23:03:53 +01:00
2018-03-18 01:42:31 +01:00
// On creation a new folder with ignore patterns validly has no marker yet.
if err := cfg . CheckPath ( ) ; err != nil && err != config . ErrMarkerMissing {
2017-08-22 06:48:25 +00:00
return nil , nil , err
}
2015-12-30 21:30:47 +00:00
2017-08-22 06:48:25 +00:00
ignores , ok := m . folderIgnores [ folder ]
if ok {
2017-04-01 09:58:06 +00:00
return ignores . Lines ( ) , ignores . Patterns ( ) , nil
2014-09-14 23:03:53 +01:00
}
2017-08-22 06:48:25 +00:00
ignores = ignore . New ( fs . NewFilesystem ( cfg . FilesystemType , cfg . Path ) )
if err := ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
return nil , nil , err
2014-09-14 23:03:53 +01:00
}
2017-08-22 06:48:25 +00:00
return ignores . Lines ( ) , ignores . Patterns ( ) , nil
2014-09-14 23:03:53 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) SetIgnores ( folder string , content [ ] string ) error {
2017-04-01 09:58:06 +00:00
cfg , ok := m . cfg . Folders ( ) [ folder ]
2014-09-14 23:03:53 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2017-08-19 14:36:56 +00:00
if err := ignore . WriteIgnores ( cfg . Filesystem ( ) , ".stignore" , content ) ; err != nil {
2014-09-14 23:03:53 +01:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2017-04-01 09:58:06 +00:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ok {
return runner . Scan ( nil )
2014-09-14 23:03:53 +01:00
}
2017-04-01 09:58:06 +00:00
return nil
2014-09-14 23:03:53 +01:00
}
2016-03-25 20:29:07 +00:00
// OnHello is called when an device connects to us.
// This allows us to extract some information from the Hello message
// and add it to a list of known devices ahead of any checks.
2016-08-05 09:29:49 +00:00
func ( m * Model ) OnHello ( remoteID protocol . DeviceID , addr net . Addr , hello protocol . HelloResult ) error {
if m . cfg . IgnoredDevice ( remoteID ) {
return errDeviceIgnored
}
2017-04-01 09:52:31 +00:00
cfg , ok := m . cfg . Device ( remoteID )
if ! ok {
events . Default . Log ( events . DeviceRejected , map [ string ] string {
"name" : hello . DeviceName ,
"device" : remoteID . String ( ) ,
"address" : addr . String ( ) ,
} )
return errDeviceUnknown
2016-03-25 20:29:07 +00:00
}
2016-08-05 09:29:49 +00:00
2017-04-01 09:52:31 +00:00
if cfg . Paused {
return errDevicePaused
}
2016-08-05 09:29:49 +00:00
2017-04-01 09:52:31 +00:00
if len ( cfg . AllowedNetworks ) > 0 {
if ! connections . IsAllowedNetwork ( addr . String ( ) , cfg . AllowedNetworks ) {
return errNetworkNotAllowed
}
}
return nil
2016-03-25 20:29:07 +00:00
}
// GetHello is called when we are about to connect to some remote device.
2017-05-22 19:58:33 +00:00
func ( m * Model ) GetHello ( id protocol . DeviceID ) protocol . HelloIntf {
name := ""
if _ , ok := m . cfg . Device ( id ) ; ok {
name = m . cfg . MyName ( )
}
2016-07-04 10:40:29 +00:00
return & protocol . Hello {
2017-05-22 19:58:33 +00:00
DeviceName : name ,
2016-03-25 20:29:07 +00:00
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
}
}
2014-01-06 11:11:18 +01:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 12:00:38 +01:00
// folder changes.
2016-06-09 10:50:14 +00:00
func ( m * Model ) AddConnection ( conn connections . Connection , hello protocol . HelloResult ) {
2015-06-28 16:05:29 +01:00
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2016-08-10 09:37:32 +00:00
if oldConn , ok := m . conn [ deviceID ] ; ok {
l . Infoln ( "Replacing old connection" , oldConn , "with" , conn , "for" , deviceID )
// There is an existing connection to this device that we are
// replacing. We must close the existing connection and wait for the
// close to complete before adding the new connection. We do the
// actual close without holding pmut as the connection will call
// back into Closed() for the cleanup.
closed := m . closed [ deviceID ]
m . pmut . Unlock ( )
closeRawConn ( oldConn )
<- closed
m . pmut . Lock ( )
2014-03-23 08:45:05 +01:00
}
2016-08-10 09:37:32 +00:00
2015-06-28 16:05:29 +01:00
m . conn [ deviceID ] = conn
2016-08-10 09:37:32 +00:00
m . closed [ deviceID ] = make ( chan struct { } )
2016-04-15 10:59:41 +00:00
m . deviceDownloads [ deviceID ] = newDeviceDownloadState ( )
2014-01-06 11:11:18 +01:00
2016-03-25 20:29:07 +00:00
m . helloMessages [ deviceID ] = hello
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"deviceName" : hello . DeviceName ,
"clientName" : hello . ClientName ,
"clientVersion" : hello . ClientVersion ,
2016-11-30 07:54:20 +00:00
"type" : conn . Type ( ) ,
2016-03-25 20:29:07 +00:00
}
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
events . Default . Log ( events . DeviceConnected , event )
2018-01-12 11:27:55 +00:00
l . Infof ( ` Device %s client is "%s %s" named "%s" at %s ` , deviceID , hello . ClientName , hello . ClientVersion , hello . DeviceName , conn )
2016-03-25 20:29:07 +00:00
2015-06-28 16:05:29 +01:00
conn . Start ( )
2016-12-21 12:22:18 +00:00
m . pmut . Unlock ( )
2015-07-10 16:37:57 +10:00
2016-12-21 12:22:18 +00:00
// Acquires fmut, so has to be done outside of pmut.
2015-11-17 12:08:53 +01:00
cm := m . generateClusterConfig ( deviceID )
2015-06-28 16:05:29 +01:00
conn . ClusterConfig ( cm )
2014-09-20 19:14:45 +02:00
2016-04-18 20:25:31 +00:00
device , ok := m . cfg . Devices ( ) [ deviceID ]
2017-12-07 07:08:24 +00:00
if ok && ( device . Name == "" || m . cfg . Options ( ) . OverwriteRemoteDevNames ) && hello . DeviceName != "" {
2016-04-18 20:25:31 +00:00
device . Name = hello . DeviceName
m . cfg . SetDevice ( device )
m . cfg . Save ( )
}
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-20 19:14:45 +02:00
}
2016-07-04 10:40:29 +00:00
func ( m * Model ) DownloadProgress ( device protocol . DeviceID , folder string , updates [ ] protocol . FileDownloadProgressUpdate ) {
2016-04-15 10:59:41 +00:00
if ! m . folderSharedWith ( folder , device ) {
return
}
m . fmut . RLock ( )
cfg , ok := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
2016-12-16 22:23:35 +00:00
if ! ok || cfg . Type == config . FolderTypeSendOnly || cfg . DisableTempIndexes {
2016-04-15 10:59:41 +00:00
return
}
m . pmut . RLock ( )
m . deviceDownloads [ device ] . Update ( folder , updates )
2016-05-26 06:53:27 +00:00
state := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
2016-04-15 10:59:41 +00:00
m . pmut . RUnlock ( )
2016-05-22 07:52:08 +00:00
events . Default . Log ( events . RemoteDownloadProgress , map [ string ] interface { } {
"device" : device . String ( ) ,
"folder" : folder ,
2016-05-26 06:53:27 +00:00
"state" : state ,
2016-05-22 07:52:08 +00:00
} )
2016-04-15 10:59:41 +00:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2014-09-28 12:00:38 +01:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 19:14:45 +02:00
return sr
}
2014-12-08 16:36:15 +01:00
2015-09-04 13:22:59 +02:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
2014-12-08 16:36:15 +01:00
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 19:14:45 +02:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 13:04:37 +02:00
}
2014-12-07 20:21:12 +00:00
func ( m * Model ) folderStatRef ( folder string ) * stats . FolderStatisticsReference {
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-12-16 23:33:28 +01:00
sr , ok := m . folderStatRefs [ folder ]
if ! ok {
2014-12-07 20:21:12 +00:00
sr = stats . NewFolderStatisticsReference ( m . db , folder )
m . folderStatRefs [ folder ] = sr
}
2014-12-16 23:33:28 +01:00
return sr
2014-12-07 20:21:12 +00:00
}
2015-06-16 12:12:34 +01:00
func ( m * Model ) receivedFile ( folder string , file protocol . FileInfo ) {
2015-09-04 13:22:59 +02:00
m . folderStatRef ( folder ) . ReceivedFile ( file . Name , file . IsDeleted ( ) )
2014-12-07 20:21:12 +00:00
}
2016-12-17 19:48:33 +00:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher , startSequence int64 , dbLocation string , dropSymlinks bool ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2018-01-12 11:27:55 +00:00
l . Debugf ( "Starting sendIndexes for %s to %s at %s (slv=%d)" , folder , deviceID , conn , startSequence )
defer l . Debugf ( "Exiting sendIndexes for %s to %s at %s: %v" , folder , deviceID , conn , err )
2014-05-04 17:18:58 +02:00
2016-12-17 19:48:33 +00:00
minSequence , err := sendIndexTo ( startSequence , conn , folder , fs , ignores , dbLocation , dropSymlinks )
2014-07-30 20:08:04 +02:00
2016-01-11 16:49:44 +01:00
// Subscribe to LocalIndexUpdated (we have new information to send) and
// DeviceDisconnected (it might be us who disconnected, so we should
// exit).
sub := events . Default . Subscribe ( events . LocalIndexUpdated | events . DeviceDisconnected )
2015-07-28 21:22:44 +04:00
defer events . Default . Unsubscribe ( sub )
2014-07-15 13:04:37 +02:00
for err == nil {
2016-01-11 16:49:44 +01:00
if conn . Closed ( ) {
// Our work is done.
return
}
2016-07-29 19:54:24 +00:00
// While we have sent a sequence at least equal to the one
2015-07-28 21:22:44 +04:00
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2016-07-29 19:54:24 +00:00
if fs . Sequence ( protocol . LocalDeviceID ) <= minSequence {
2015-07-28 21:22:44 +04:00
sub . Poll ( time . Minute )
2014-07-30 20:08:04 +02:00
continue
2014-07-15 13:04:37 +02:00
}
2016-12-17 19:48:33 +00:00
minSequence , err = sendIndexTo ( minSequence , conn , folder , fs , ignores , dbLocation , dropSymlinks )
2015-07-28 21:22:44 +04:00
// Wait a short amount of time before entering the next loop. If there
2015-11-11 21:20:34 -05:00
// are continuous changes happening to the local index, this gives us
2015-07-28 21:22:44 +04:00
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 20:08:04 +02:00
}
}
2014-07-15 13:04:37 +02:00
2016-12-17 19:48:33 +00:00
func sendIndexTo ( minSequence int64 , conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher , dbLocation string , dropSymlinks bool ) ( int64 , error ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2017-04-22 14:23:33 +00:00
batch := make ( [ ] protocol . FileInfo , 0 , maxBatchSizeFiles )
batchSizeBytes := 0
2016-07-29 19:54:24 +00:00
initial := minSequence == 0
maxSequence := minSequence
2014-07-30 20:08:04 +02:00
var err error
2018-01-12 11:27:55 +00:00
debugMsg := func ( t string ) string {
return fmt . Sprintf ( "Sending indexes for %s to %s at %s: %d files (<%d bytes) (%s)" , folder , deviceID , conn , len ( batch ) , batchSizeBytes , t )
}
2014-07-15 13:04:37 +02:00
2016-07-27 21:38:43 +00:00
sorter := NewIndexSorter ( dbLocation )
2016-07-21 17:21:15 +00:00
defer sorter . Close ( )
2015-01-12 14:50:30 +01:00
fs . WithHave ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
f := fi . ( protocol . FileInfo )
2016-07-29 19:54:24 +00:00
if f . Sequence <= minSequence {
2014-07-30 20:08:04 +02:00
return true
}
2014-07-15 13:04:37 +02:00
2016-07-29 19:54:24 +00:00
if f . Sequence > maxSequence {
maxSequence = f . Sequence
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
2016-12-17 19:48:33 +00:00
if dropSymlinks && f . IsSymlink ( ) {
// Do not send index entries with symlinks to clients that can't
// handle it. Fixes issue #3802. Once both sides are upgraded, a
// rescan (i.e., change) of the symlink is required for it to
// sync again, due to delta indexes.
return true
}
2016-07-21 17:21:15 +00:00
sorter . Append ( f )
return true
} )
sorter . Sorted ( func ( f protocol . FileInfo ) bool {
2017-04-22 14:23:33 +00:00
if len ( batch ) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
2014-07-30 20:08:04 +02:00
if initial {
2016-07-04 10:40:29 +00:00
if err = conn . Index ( folder , batch ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
2018-01-12 11:27:55 +00:00
l . Debugln ( debugMsg ( "initial index" ) )
2014-07-30 20:08:04 +02:00
initial = false
} else {
2016-07-04 10:40:29 +00:00
if err = conn . IndexUpdate ( folder , batch ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
2018-01-12 11:27:55 +00:00
l . Debugln ( debugMsg ( "batched update" ) )
2014-07-03 12:30:10 +02:00
}
2014-01-06 11:11:18 +01:00
2017-04-22 14:23:33 +00:00
batch = make ( [ ] protocol . FileInfo , 0 , maxBatchSizeFiles )
batchSizeBytes = 0
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
batch = append ( batch , f )
2017-04-22 14:23:33 +00:00
batchSizeBytes += f . ProtoSize ( )
2014-07-30 20:08:04 +02:00
return true
} )
if initial && err == nil {
2016-07-04 10:40:29 +00:00
err = conn . Index ( folder , batch )
2015-10-03 17:25:21 +02:00
if err == nil {
2018-01-12 11:27:55 +00:00
l . Debugln ( debugMsg ( "small initial index" ) )
2014-07-30 20:08:04 +02:00
}
} else if len ( batch ) > 0 && err == nil {
2016-07-04 10:40:29 +00:00
err = conn . IndexUpdate ( folder , batch )
2015-10-03 17:25:21 +02:00
if err == nil {
2018-01-12 11:27:55 +00:00
l . Debugln ( debugMsg ( "last batch" ) )
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
2016-07-29 19:54:24 +00:00
return maxSequence , err
2014-01-06 11:11:18 +01:00
}
2016-05-19 00:19:26 +00:00
func ( m * Model ) updateLocalsFromScanning ( folder string , fs [ ] protocol . FileInfo ) {
2016-05-19 07:01:43 +00:00
m . updateLocals ( folder , fs )
m . fmut . RLock ( )
2016-09-28 15:54:13 +00:00
folderCfg := m . folderCfgs [ folder ]
2016-05-19 07:01:43 +00:00
m . fmut . RUnlock ( )
2016-12-21 16:35:20 +00:00
m . diskChangeDetected ( folderCfg , fs , events . LocalChangeDetected )
2016-05-19 00:19:26 +00:00
}
func ( m * Model ) updateLocalsFromPulling ( folder string , fs [ ] protocol . FileInfo ) {
2016-05-19 07:01:43 +00:00
m . updateLocals ( folder , fs )
2016-12-21 16:35:20 +00:00
m . fmut . RLock ( )
folderCfg := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
m . diskChangeDetected ( folderCfg , fs , events . RemoteChangeDetected )
2016-05-19 00:19:26 +00:00
}
2016-05-19 07:01:43 +00:00
func ( m * Model ) updateLocals ( folder string , fs [ ] protocol . FileInfo ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-06-16 08:30:15 +02:00
files := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2016-01-16 21:42:32 +01:00
if files == nil {
// The folder doesn't exist.
return
}
2015-06-16 08:30:15 +02:00
files . Update ( protocol . LocalDeviceID , fs )
2015-12-04 08:41:13 +01:00
filenames := make ( [ ] string , len ( fs ) )
for i , file := range fs {
filenames [ i ] = file . Name
}
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2015-12-04 08:41:13 +01:00
"folder" : folder ,
"items" : len ( fs ) ,
"filenames" : filenames ,
2016-07-29 19:54:24 +00:00
"version" : files . Sequence ( protocol . LocalDeviceID ) ,
2014-07-17 13:38:36 +02:00
} )
2016-05-19 00:19:26 +00:00
}
2016-12-21 16:35:20 +00:00
func ( m * Model ) diskChangeDetected ( folderCfg config . FolderConfiguration , files [ ] protocol . FileInfo , typeOfEvent events . EventType ) {
2016-05-19 00:19:26 +00:00
for _ , file := range files {
2018-03-25 21:12:50 +01:00
if file . IsInvalid ( ) {
continue
}
2016-05-19 00:19:26 +00:00
objType := "file"
2016-05-19 07:01:43 +00:00
action := "modified"
2016-05-19 00:19:26 +00:00
2017-11-11 19:18:17 +00:00
switch {
case file . IsDeleted ( ) :
action = "deleted"
2016-07-15 14:23:20 +00:00
// If our local vector is version 1 AND it is the only version
2016-07-04 10:40:29 +00:00
// vector so far seen for this file then it is a new file. Else if
// it is > 1 it's not new, and if it is 1 but another shortId
// version vector exists then it is new for us but created elsewhere
// so the file is still not new but modified by us. Only if it is
// truly new do we change this to 'added', else we leave it as
// 'modified'.
2017-11-11 19:18:17 +00:00
case len ( file . Version . Counters ) == 1 && file . Version . Counters [ 0 ] . Value == 1 :
2016-05-19 07:01:43 +00:00
action = "added"
2016-05-19 00:19:26 +00:00
}
2018-03-25 21:12:50 +01:00
if file . IsSymlink ( ) {
objType = "symlink"
} else if file . IsDirectory ( ) {
2016-05-19 00:19:26 +00:00
objType = "dir"
}
2016-12-21 16:35:20 +00:00
// Two different events can be fired here based on what EventType is passed into function
events . Default . Log ( typeOfEvent , map [ string ] string {
2017-05-30 08:57:18 +02:00
"folder" : folderCfg . ID ,
"folderID" : folderCfg . ID , // incorrect, deprecated, kept for historical compliance
2016-12-21 16:35:20 +00:00
"label" : folderCfg . Label ,
"action" : action ,
"type" : objType ,
2017-08-19 14:36:56 +00:00
"path" : filepath . FromSlash ( file . Name ) ,
2016-12-21 16:35:20 +00:00
"modifiedBy" : file . ModifiedBy . String ( ) ,
2016-05-19 00:19:26 +00:00
} )
}
2014-03-28 14:36:57 +01:00
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , fromTemporary bool ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
nc , ok := m . conn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 11:11:18 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 11:11:18 +01:00
}
2016-07-23 12:46:31 +00:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x ft=%t" , m , deviceID , folder , name , offset , size , hash , fromTemporary )
2014-01-06 11:11:18 +01:00
2016-04-15 10:59:41 +00:00
return nc . Request ( folder , name , offset , size , hash , fromTemporary )
2014-01-06 11:11:18 +01:00
}
2015-02-11 19:52:59 +01:00
func ( m * Model ) ScanFolders ( ) map [ string ] error {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-13 05:12:01 +09:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 12:00:38 +01:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 23:54:31 +01:00
errorsMut := sync . NewMutex ( )
2015-02-11 19:52:59 +01:00
2015-04-22 23:54:31 +01:00
wg := sync . NewWaitGroup ( )
2014-09-28 12:00:38 +01:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 20:42:12 -03:00
go func ( ) {
2014-09-28 12:00:38 +01:00
err := m . ScanFolder ( folder )
2014-05-28 06:55:30 +02:00
if err != nil {
2015-02-11 19:52:59 +01:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2015-04-13 05:12:01 +09:00
2015-03-28 14:25:42 +00:00
// Potentially sets the error twice, once in the scanner just
// by doing a check, and once here, if the error returned is
2017-10-24 07:58:55 +00:00
// the same one as returned by CheckHealth, though
2015-04-13 05:12:01 +09:00
// duplicate set is handled by setError.
m . fmut . RLock ( )
srv := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
srv . setError ( err )
2014-05-28 06:55:30 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Done ( )
} ( )
2014-04-14 09:58:17 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Wait ( )
2015-02-11 19:52:59 +01:00
return errors
2014-03-29 18:53:48 +01:00
}
2013-12-15 11:43:31 +01:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) ScanFolder ( folder string ) error {
2016-06-29 06:37:34 +00:00
return m . ScanFolderSubdirs ( folder , nil )
2014-08-11 20:20:01 +02:00
}
2016-06-29 06:37:34 +00:00
func ( m * Model ) ScanFolderSubdirs ( folder string , subs [ ] string ) error {
2017-12-15 20:01:56 +00:00
m . fmut . RLock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
m . fmut . RUnlock ( )
return err
2015-06-20 19:26:25 +02:00
}
2017-12-15 20:01:56 +00:00
runner := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
2015-06-20 19:26:25 +02:00
return runner . Scan ( subs )
}
2017-04-26 00:15:23 +00:00
func ( m * Model ) internalScanFolderSubdirs ( ctx context . Context , folder string , subDirs [ ] string ) error {
2017-12-15 20:01:56 +00:00
m . fmut . RLock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
m . fmut . RUnlock ( )
return err
}
fset := m . folderFiles [ folder ]
folderCfg := m . folderCfgs [ folder ]
ignores := m . folderIgnores [ folder ]
runner := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
mtimefs := fset . MtimeFS ( )
2016-12-21 10:33:07 +00:00
for i := 0 ; i < len ( subDirs ) ; i ++ {
sub := osutil . NativeFilename ( subDirs [ i ] )
if sub == "" {
// A blank subdirs means to scan the entire folder. We can trim
// the subDirs list and go on our way.
subDirs = nil
break
}
2016-12-01 12:35:11 +00:00
// We test each path by joining with "root". What we join with is
2016-12-16 11:21:22 +00:00
// not relevant, we just want the dotdot escape detection here. For
// historical reasons we may get paths that end in a slash. We
// remove that first to allow the rootedJoinedPath to pass.
2017-08-19 14:36:56 +00:00
sub = strings . TrimRight ( sub , string ( fs . PathSeparator ) )
2016-06-29 06:37:34 +00:00
subDirs [ i ] = sub
2014-08-11 20:20:01 +02:00
}
2016-08-05 07:13:52 +00:00
// Check if the ignore patterns changed as part of scanning this folder.
// If they did we should schedule a pull of the folder so that we
// request things we might have suddenly become unignored and so on.
oldHash := ignores . Hash ( )
defer func ( ) {
if ignores . Hash ( ) != oldHash {
l . Debugln ( "Folder" , folder , "ignore patterns changed; triggering puller" )
2017-10-20 14:52:55 +00:00
runner . IgnoresUpdated ( )
2016-08-05 07:13:52 +00:00
}
} ( )
2017-10-24 07:58:55 +00:00
if err := runner . CheckHealth ( ) ; err != nil {
2015-07-16 12:52:36 +02:00
return err
}
2017-08-19 14:36:56 +00:00
if err := ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
2015-09-29 18:01:19 +02:00
err = fmt . Errorf ( "loading ignores: %v" , err )
runner . setError ( err )
return err
}
2014-09-04 22:29:53 +02:00
2016-03-18 08:28:44 +00:00
// Clean the list of subitems to ensure that we start at a known
// directory, and don't scan subdirectories of things we've already
// scanned.
2016-06-29 06:37:34 +00:00
subDirs = unifySubs ( subDirs , func ( f string ) bool {
2017-08-19 14:36:56 +00:00
_ , ok := fset . Get ( protocol . LocalDeviceID , f )
2016-03-18 08:28:44 +00:00
return ok
} )
2015-03-08 17:33:41 +00:00
2016-05-09 18:25:39 +00:00
runner . setState ( FolderScanning )
2018-02-14 08:59:46 +01:00
fchan := scanner . Walk ( ctx , scanner . Config {
2015-08-26 23:49:06 +01:00
Folder : folderCfg . ID ,
2016-06-29 06:37:34 +00:00
Subs : subDirs ,
2015-08-26 23:49:06 +01:00
Matcher : ignores ,
BlockSize : protocol . BlockSize ,
TempLifetime : time . Duration ( m . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
2018-02-14 08:59:46 +01:00
CurrentFiler : cFiler { m , folder } ,
2017-04-01 09:04:11 +00:00
Filesystem : mtimefs ,
2015-08-26 23:49:06 +01:00
IgnorePerms : folderCfg . IgnorePerms ,
AutoNormalize : folderCfg . AutoNormalize ,
Hashers : m . numHashers ( folder ) ,
ShortID : m . shortID ,
ProgressTickIntervalS : folderCfg . ScanProgressIntervalS ,
2017-02-06 10:27:11 +00:00
UseWeakHashes : weakhash . Enabled ,
2016-05-09 18:25:39 +00:00
} )
2014-07-15 14:27:46 +02:00
2018-01-24 00:05:47 +00:00
if err := runner . CheckHealth ( ) ; err != nil {
2014-05-04 18:20:25 +02:00
return err
}
2015-04-13 05:12:01 +09:00
2017-04-22 14:23:33 +00:00
batch := make ( [ ] protocol . FileInfo , 0 , maxBatchSizeFiles )
batchSizeBytes := 0
2017-11-17 12:11:45 +00:00
changes := 0
// Schedule a pull after scanning, but only if we actually detected any
// changes.
defer func ( ) {
if changes > 0 {
runner . SchedulePull ( )
}
} ( )
2015-04-17 15:19:40 +09:00
2018-02-14 08:59:46 +01:00
for f := range fchan {
if len ( batch ) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
if err := runner . CheckHealth ( ) ; err != nil {
2017-10-24 07:58:55 +00:00
l . Debugln ( "Stopping scan of folder %s due to: %s" , folderCfg . Description ( ) , err )
2015-03-28 14:25:42 +00:00
return err
}
2018-02-14 08:59:46 +01:00
m . updateLocalsFromScanning ( folder , batch )
batch = batch [ : 0 ]
batchSizeBytes = 0
2018-02-10 16:56:53 +01:00
}
2014-07-15 14:27:46 +02:00
2018-02-14 08:59:46 +01:00
batch = append ( batch , f )
batchSizeBytes += f . ProtoSize ( )
2018-02-10 16:56:53 +01:00
changes ++
2016-03-18 12:16:33 +00:00
}
2014-09-04 22:29:53 +02:00
2018-02-14 08:59:46 +01:00
if err := runner . CheckHealth ( ) ; err != nil {
l . Debugln ( "Stopping scan of folder %s due to: %s" , folderCfg . Description ( ) , err )
return err
} else if len ( batch ) > 0 {
m . updateLocalsFromScanning ( folder , batch )
}
2018-02-10 16:56:53 +01:00
2018-02-14 08:59:46 +01:00
if len ( subDirs ) == 0 {
// If we have no specific subdirectories to traverse, set it to one
// empty prefix so we traverse the entire folder contents once.
subDirs = [ ] string { "" }
}
// Do a scan of the database for each prefix, to check for deleted and
// ignored files.
batch = batch [ : 0 ]
batchSizeBytes = 0
for _ , sub := range subDirs {
var iterError error
fset . WithPrefixedHaveTruncated ( protocol . LocalDeviceID , sub , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if len ( batch ) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
if err := runner . CheckHealth ( ) ; err != nil {
iterError = err
return false
}
m . updateLocalsFromScanning ( folder , batch )
batch = batch [ : 0 ]
batchSizeBytes = 0
}
switch {
case ! f . IsInvalid ( ) && ignores . Match ( f . Name ) . IsIgnored ( ) :
// File was valid at last pass but has been ignored. Set invalid bit.
l . Debugln ( "setting invalid bit on ignored" , f )
nf := f . ConvertToInvalidFileInfo ( m . id . Short ( ) )
batch = append ( batch , nf )
batchSizeBytes += nf . ProtoSize ( )
changes ++
2018-02-25 09:27:54 +01:00
case f . IsInvalid ( ) && ! ignores . Match ( f . Name ) . IsIgnored ( ) :
// Successfully scanned items are already un-ignored during
// the scan, so check whether it is deleted.
fallthrough
2018-02-14 08:59:46 +01:00
case ! f . IsInvalid ( ) && ! f . IsDeleted ( ) :
// The file is valid and not deleted. Lets check if it's
// still here.
2018-02-25 09:27:54 +01:00
// Simply stating it wont do as there are tons of corner
// cases (e.g. parent dir->simlink, missing permissions)
if ! osutil . IsDeleted ( mtimefs , f . Name ) {
return true
}
nf := protocol . FileInfo {
Name : f . Name ,
Type : f . Type ,
Size : 0 ,
ModifiedS : f . ModifiedS ,
ModifiedNs : f . ModifiedNs ,
ModifiedBy : m . id . Short ( ) ,
Deleted : true ,
Version : f . Version . Update ( m . shortID ) ,
2018-02-14 08:59:46 +01:00
}
2018-02-25 13:03:55 +01:00
// We do not want to override the global version
// with the deleted file. Keeping only our local
// counter makes sure we are in conflict with any
// other existing versions, which will be resolved
// by the normal pulling mechanisms.
if f . IsInvalid ( ) {
nf . Version . DropOthers ( m . shortID )
}
2018-02-25 09:27:54 +01:00
batch = append ( batch , nf )
batchSizeBytes += nf . ProtoSize ( )
changes ++
2018-02-14 08:59:46 +01:00
}
return true
} )
if iterError != nil {
l . Debugln ( "Stopping scan of folder %s due to: %s" , folderCfg . Description ( ) , iterError )
return iterError
}
2015-05-27 22:46:10 +01:00
}
2017-10-24 07:58:55 +00:00
if err := runner . CheckHealth ( ) ; err != nil {
l . Debugln ( "Stopping scan of folder %s due to: %s" , folderCfg . Description ( ) , err )
2015-05-27 22:46:10 +01:00
return err
} else if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
}
2016-06-02 19:26:52 +00:00
m . folderStatRef ( folder ) . ScanCompleted ( )
2015-04-13 05:12:01 +09:00
runner . setState ( FolderIdle )
2014-05-04 18:20:25 +02:00
return nil
2014-03-29 18:53:48 +01:00
}
2015-05-01 14:30:17 +02:00
func ( m * Model ) DelayScan ( folder string , next time . Duration ) {
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 20:46:32 +02:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
func ( m * Model ) numHashers ( folder string ) int {
m . fmut . Lock ( )
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
m . fmut . Unlock ( )
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 10:05:06 +02:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 20:46:32 +02:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2015-11-17 12:08:53 +01:00
// generateClusterConfig returns a ClusterConfigMessage that is correct for
// the given peer device
2016-07-04 10:40:29 +00:00
func ( m * Model ) generateClusterConfig ( device protocol . DeviceID ) protocol . ClusterConfig {
var message protocol . ClusterConfig
2014-04-13 15:28:26 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-11-17 07:45:45 +01:00
// The list of folders in the message is sorted, so we always get the
// same order.
folders := m . deviceFolders [ device ]
sort . Strings ( folders )
for _ , folder := range folders {
2015-09-27 12:11:34 +01:00
folderCfg := m . cfg . Folders ( ) [ folder ]
2016-07-23 12:46:31 +00:00
fs := m . folderFiles [ folder ]
2016-03-11 09:48:46 +00:00
protocolFolder := protocol . Folder {
2016-07-04 10:40:29 +00:00
ID : folder ,
Label : folderCfg . Label ,
2016-12-16 22:23:35 +00:00
ReadOnly : folderCfg . Type == config . FolderTypeSendOnly ,
2016-07-04 10:40:29 +00:00
IgnorePermissions : folderCfg . IgnorePerms ,
IgnoreDelete : folderCfg . IgnoreDelete ,
DisableTempIndexes : folderCfg . DisableTempIndexes ,
2016-12-21 18:41:25 +00:00
Paused : folderCfg . Paused ,
2015-09-27 12:11:34 +01:00
}
2016-07-04 10:40:29 +00:00
2016-11-17 07:45:45 +01:00
// Devices are sorted, so we always get the same order.
for _ , device := range m . folderDevices . sortedDevices ( folder ) {
2015-09-27 11:39:02 +01:00
deviceCfg := m . cfg . Devices ( ) [ device ]
2016-07-23 12:46:31 +00:00
var indexID protocol . IndexID
2016-07-29 19:54:24 +00:00
var maxSequence int64
2016-07-23 12:46:31 +00:00
if device == m . id {
indexID = fs . IndexID ( protocol . LocalDeviceID )
2016-07-29 19:54:24 +00:00
maxSequence = fs . Sequence ( protocol . LocalDeviceID )
2016-07-23 12:46:31 +00:00
} else {
indexID = fs . IndexID ( device )
2016-07-29 19:54:24 +00:00
maxSequence = fs . Sequence ( device )
2016-07-23 12:46:31 +00:00
}
2016-03-11 09:48:46 +00:00
protocolDevice := protocol . Device {
2016-10-29 21:56:24 +00:00
ID : device ,
2016-07-29 19:54:24 +00:00
Name : deviceCfg . Name ,
Addresses : deviceCfg . Addresses ,
Compression : deviceCfg . Compression ,
CertName : deviceCfg . CertName ,
Introducer : deviceCfg . Introducer ,
IndexID : indexID ,
MaxSequence : maxSequence ,
2014-09-23 16:04:20 +02:00
}
2015-09-27 11:39:02 +01:00
2016-03-11 09:48:46 +00:00
protocolFolder . Devices = append ( protocolFolder . Devices , protocolDevice )
2014-01-09 13:58:35 +01:00
}
2016-03-11 09:48:46 +00:00
message . Folders = append ( message . Folders , protocolFolder )
2013-12-29 20:33:57 -05:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-13 15:28:26 +02:00
2016-03-11 09:48:46 +00:00
return message
2013-12-29 20:33:57 -05:00
}
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
func ( m * Model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 21:14:19 +01:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-13 05:12:01 +09:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 21:14:19 +01:00
}
2015-04-13 05:12:01 +09:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 09:58:17 +02:00
}
2014-06-16 10:47:02 +02:00
2018-01-14 17:01:06 +00:00
func ( m * Model ) PullErrors ( folder string ) ( [ ] FileError , error ) {
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
return nil , err
}
return m . folderRunners [ folder ] . PullErrors ( ) , nil
}
2018-02-04 22:46:24 +01:00
func ( m * Model ) WatchError ( folder string ) error {
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
return err
}
return m . folderRunners [ folder ] . WatchError ( )
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) Override ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2015-03-16 21:14:19 +01:00
runner := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return
}
2014-06-23 11:52:13 +02:00
2015-03-16 21:14:19 +01:00
runner . setState ( FolderScanning )
2017-04-22 14:23:33 +00:00
batch := make ( [ ] protocol . FileInfo , 0 , maxBatchSizeFiles )
batchSizeBytes := 0
2015-01-12 14:50:30 +01:00
fs . WithNeed ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
need := fi . ( protocol . FileInfo )
2017-04-22 14:23:33 +00:00
if len ( batch ) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 17:54:00 +02:00
batch = batch [ : 0 ]
2017-04-22 14:23:33 +00:00
batchSizeBytes = 0
2014-07-15 17:54:00 +02:00
}
2015-01-06 22:12:45 +01:00
have , ok := fs . Get ( protocol . LocalDeviceID , need . Name )
2017-11-11 19:18:17 +00:00
// Don't override invalid (e.g. ignored) files
if ok && have . Invalid {
return true
}
2015-01-06 22:12:45 +01:00
if ! ok || have . Name != need . Name {
2014-06-16 10:47:02 +02:00
// We are missing the file
2016-07-04 10:40:29 +00:00
need . Deleted = true
2014-07-15 17:54:00 +02:00
need . Blocks = nil
2015-04-02 10:21:11 +02:00
need . Version = need . Version . Update ( m . shortID )
2016-09-02 06:45:46 +00:00
need . Size = 0
2014-06-16 10:47:02 +02:00
} else {
// We have the file, replace with our version
2015-04-02 10:21:11 +02:00
have . Version = have . Version . Merge ( need . Version ) . Update ( m . shortID )
2014-07-15 17:54:00 +02:00
need = have
2014-06-16 10:47:02 +02:00
}
2016-07-29 19:54:24 +00:00
need . Sequence = 0
2014-07-15 17:54:00 +02:00
batch = append ( batch , need )
2017-04-22 14:23:33 +00:00
batchSizeBytes += need . ProtoSize ( )
2014-07-15 17:54:00 +02:00
return true
} )
if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-06-16 10:47:02 +02:00
}
2015-03-16 21:14:19 +01:00
runner . setState ( FolderIdle )
2014-06-16 10:47:02 +02:00
}
2014-06-20 00:27:54 +02:00
2016-07-29 19:54:24 +00:00
// CurrentSequence returns the change version for the given folder.
2014-09-28 12:00:38 +01:00
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 14:44:15 +02:00
// changed.
2016-07-29 19:54:24 +00:00
func ( m * Model ) CurrentSequence ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
2014-10-12 10:36:04 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-09-27 14:44:15 +02:00
}
2016-07-29 19:54:24 +00:00
return fs . Sequence ( protocol . LocalDeviceID ) , true
2014-09-27 14:44:15 +02:00
}
2016-07-29 19:54:24 +00:00
// RemoteSequence returns the change version for the given folder, as
2014-09-27 14:44:15 +02:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 12:00:38 +01:00
// the remote or global folder has changed.
2016-07-29 19:54:24 +00:00
func ( m * Model ) RemoteSequence ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 17:54:00 +02:00
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-07-15 17:54:00 +02:00
if ! ok {
2014-10-24 14:54:36 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-07-15 17:54:00 +02:00
}
2015-01-18 02:12:06 +01:00
var ver int64
2016-11-07 16:40:48 +00:00
for device := range m . folderDevices [ folder ] {
ver += fs . Sequence ( device )
2014-06-20 00:27:54 +02:00
}
2015-06-24 08:52:38 +01:00
return ver , true
2014-06-20 00:27:54 +02:00
}
2014-09-27 14:44:15 +02:00
2015-02-07 10:52:42 +00:00
func ( m * Model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
files . WithPrefixedGlobalTruncated ( prefix , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if f . IsInvalid ( ) || f . IsDeleted ( ) || f . Name == prefix {
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 22:37:04 +09:00
last [ base ] = [ ] interface { } {
2016-08-06 13:05:59 +00:00
f . ModTime ( ) , f . FileSize ( ) ,
2015-02-07 10:52:42 +00:00
}
}
return true
} )
return output
}
2018-01-01 14:39:23 +00:00
func ( m * Model ) GetFolderVersions ( folder string ) ( map [ string ] [ ] versioner . FileVersion , error ) {
fcfg , ok := m . cfg . Folder ( folder )
if ! ok {
return nil , errFolderMissing
}
files := make ( map [ string ] [ ] versioner . FileVersion )
filesystem := fcfg . Filesystem ( )
err := filesystem . Walk ( ".stversions" , func ( path string , f fs . FileInfo , err error ) error {
// Skip root (which is ok to be a symlink)
if path == ".stversions" {
return nil
}
// Ignore symlinks
if f . IsSymlink ( ) {
return fs . SkipDir
}
// No records for directories
if f . IsDir ( ) {
return nil
}
// Strip .stversions prefix.
path = strings . TrimPrefix ( path , ".stversions" + string ( fs . PathSeparator ) )
name , tag := versioner . UntagFilename ( path )
// Something invalid
if name == "" || tag == "" {
return nil
}
name = osutil . NormalizedFilename ( name )
versionTime , err := time . ParseInLocation ( versioner . TimeFormat , tag , locationLocal )
if err != nil {
return nil
}
files [ name ] = append ( files [ name ] , versioner . FileVersion {
VersionTime : versionTime . Truncate ( time . Second ) ,
ModTime : f . ModTime ( ) . Truncate ( time . Second ) ,
Size : f . Size ( ) ,
} )
return nil
} )
if err != nil {
return nil , err
}
return files , nil
}
func ( m * Model ) RestoreFolderVersions ( folder string , versions map [ string ] time . Time ) ( map [ string ] string , error ) {
fcfg , ok := m . cfg . Folder ( folder )
if ! ok {
return nil , errFolderMissing
}
filesystem := fcfg . Filesystem ( )
ver := fcfg . Versioner ( )
restore := make ( map [ string ] string )
errors := make ( map [ string ] string )
// Validation
for file , version := range versions {
file = osutil . NativeFilename ( file )
tag := version . In ( locationLocal ) . Truncate ( time . Second ) . Format ( versioner . TimeFormat )
versionedTaggedFilename := filepath . Join ( ".stversions" , versioner . TagFilename ( file , tag ) )
// Check that the thing we've been asked to restore is actually a file
// and that it exists.
if info , err := filesystem . Lstat ( versionedTaggedFilename ) ; err != nil {
errors [ file ] = err . Error ( )
continue
} else if ! info . IsRegular ( ) {
errors [ file ] = "not a file"
continue
}
// Check that the target location of where we are supposed to restore
// either does not exist, or is actually a file.
if info , err := filesystem . Lstat ( file ) ; err == nil && ! info . IsRegular ( ) {
errors [ file ] = "cannot replace a non-file"
continue
} else if err != nil && ! fs . IsNotExist ( err ) {
errors [ file ] = err . Error ( )
continue
}
restore [ file ] = versionedTaggedFilename
}
// Execution
var err error
for target , source := range restore {
err = nil
if _ , serr := filesystem . Lstat ( target ) ; serr == nil {
if ver != nil {
err = osutil . InWritableDir ( ver . Archive , filesystem , target )
} else {
err = osutil . InWritableDir ( filesystem . Remove , filesystem , target )
}
}
filesystem . MkdirAll ( filepath . Dir ( target ) , 0755 )
if err == nil {
err = osutil . Copy ( filesystem , source , target )
}
if err != nil {
errors [ target ] = err . Error ( )
continue
}
}
// Trigger scan
if ! fcfg . FSWatcherEnabled {
m . ScanFolder ( folder )
}
return errors , nil
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) Availability ( folder , file string , version protocol . Vector , block protocol . BlockInfo ) [ ] Availability {
2016-11-08 06:38:50 +00:00
// The slightly unusual locking sequence here is because we need to hold
// pmut for the duration (as the value returned from foldersFiles can
// get heavily modified on Close()), but also must acquire fmut before
// pmut. (The locks can be *released* in any order.)
m . fmut . RLock ( )
2014-10-31 23:41:18 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2016-04-15 10:59:41 +00:00
devices := m . folderDevices [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2016-11-08 06:38:50 +00:00
2014-09-27 14:44:15 +02:00
if ! ok {
return nil
}
2016-04-15 10:59:41 +00:00
var availabilities [ ] Availability
2016-12-21 18:41:25 +00:00
next :
2014-10-31 23:41:18 +00:00
for _ , device := range fs . Availability ( file ) {
2016-12-21 18:41:25 +00:00
for _ , pausedFolder := range m . remotePausedFolders [ device ] {
if pausedFolder == folder {
continue next
}
}
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ device ]
2014-10-31 23:41:18 +00:00
if ok {
2016-04-15 10:59:41 +00:00
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : false } )
2014-10-31 23:41:18 +00:00
}
}
2016-04-15 10:59:41 +00:00
2016-11-07 16:40:48 +00:00
for device := range devices {
2016-04-15 10:59:41 +00:00
if m . deviceDownloads [ device ] . Has ( folder , file , version , int32 ( block . Offset / protocol . BlockSize ) ) {
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : true } )
}
}
return availabilities
2014-09-27 14:44:15 +02:00
}
2015-04-28 22:32:10 +02:00
// BringToFront bumps the given files priority in the job queue.
2014-12-30 09:35:21 +01:00
func ( m * Model ) BringToFront ( folder , file string ) {
2014-12-01 19:23:06 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 09:35:21 +01:00
runner . BringToFront ( file )
2014-12-01 19:23:06 +00:00
}
}
2015-06-21 09:35:41 +02:00
func ( m * Model ) ResetFolder ( folder string ) {
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 20:06:03 +02:00
}
2014-09-27 14:44:15 +02:00
func ( m * Model ) String ( ) string {
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 14:43:01 +02:00
2015-06-03 09:47:39 +02:00
func ( m * Model ) VerifyConfiguration ( from , to config . Configuration ) error {
return nil
}
func ( m * Model ) CommitConfiguration ( from , to config . Configuration ) bool {
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 09:02:55 +02:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 16:13:53 +02:00
for folderID , cfg := range toFolders {
2015-07-22 09:02:55 +02:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 16:13:53 +02:00
// A folder was added.
2017-04-01 09:58:06 +00:00
if cfg . Paused {
2017-12-07 07:08:24 +00:00
l . Infoln ( "Paused folder" , cfg . Description ( ) )
2017-04-23 23:50:56 +00:00
cfg . CreateRoot ( )
2017-04-01 09:58:06 +00:00
} else {
2017-12-07 07:08:24 +00:00
l . Infoln ( "Adding folder" , cfg . Description ( ) )
2017-04-01 09:58:06 +00:00
m . AddFolder ( cfg )
m . StartFolder ( folderID )
}
2015-07-22 09:02:55 +02:00
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
2015-11-13 13:30:52 +01:00
// The folder was removed.
2017-10-03 23:53:02 +01:00
m . RemoveFolder ( fromCfg )
2015-11-13 13:30:52 +01:00
continue
2015-07-22 09:02:55 +02:00
}
2016-08-07 16:21:59 +00:00
// This folder exists on both sides. Settings might have changed.
2017-12-07 08:33:32 +00:00
// Check if anything differs that requires a restart.
if ! reflect . DeepEqual ( fromCfg . RequiresRestartOnly ( ) , toCfg . RequiresRestartOnly ( ) ) {
2016-08-07 16:21:59 +00:00
m . RestartFolder ( toCfg )
2015-07-22 09:02:55 +02:00
}
2016-12-21 18:41:25 +00:00
// Emit the folder pause/resume event
if fromCfg . Paused != toCfg . Paused {
eventType := events . FolderResumed
if toCfg . Paused {
eventType = events . FolderPaused
}
events . Default . Log ( eventType , map [ string ] string { "id" : toCfg . ID , "label" : toCfg . Label } )
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
2016-08-07 16:21:59 +00:00
// Removing a device. We actually don't need to do anything.
// Because folder config has changed (since the device lists do not match)
// Folders for that had device got "restarted", which involves killing
// connections to all devices that we were sharing the folder with.
// At some point model.Close() will get called for that device which will
// clean residue device state that is not part of any folder.
2015-06-03 09:47:39 +02:00
2016-12-21 18:41:25 +00:00
// Pausing a device, unpausing is handled by the connection service.
2018-03-26 12:01:59 +02:00
fromDevices := from . DeviceMap ( )
toDevices := to . DeviceMap ( )
2016-12-21 18:41:25 +00:00
for deviceID , toCfg := range toDevices {
fromCfg , ok := fromDevices [ deviceID ]
if ! ok || fromCfg . Paused == toCfg . Paused {
continue
}
if toCfg . Paused {
l . Infoln ( "Pausing" , deviceID )
m . close ( deviceID )
events . Default . Log ( events . DevicePaused , map [ string ] string { "device" : deviceID . String ( ) } )
} else {
events . Default . Log ( events . DeviceResumed , map [ string ] string { "device" : deviceID . String ( ) } )
}
}
2016-01-18 10:06:31 -08:00
// Some options don't require restart as those components handle it fine
2017-12-07 08:33:32 +00:00
// by themselves. Compare the options structs containing only the
// attributes that require restart and act apprioriately.
if ! reflect . DeepEqual ( from . Options . RequiresRestartOnly ( ) , to . Options . RequiresRestartOnly ( ) ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, options differ" )
2015-06-03 09:47:39 +02:00
return false
}
return true
}
2017-12-15 20:01:56 +00:00
// checkFolderRunningLocked returns nil if the folder is up and running and a
// descriptive error if not.
// Need to hold (read) lock on m.fmut when calling this.
func ( m * Model ) checkFolderRunningLocked ( folder string ) error {
_ , ok := m . folderRunners [ folder ]
if ok {
return nil
}
if cfg , ok := m . cfg . Folder ( folder ) ; ! ok {
return errFolderMissing
} else if cfg . Paused {
2018-01-14 17:01:06 +00:00
return ErrFolderPaused
2017-12-15 20:01:56 +00:00
}
return errFolderNotRunning
}
// checkFolderDeviceStatusLocked first checks the folder and then whether the
// given device is connected and shares this folder.
// Need to hold (read) lock on both m.fmut and m.pmut when calling this.
func ( m * Model ) checkDeviceFolderConnectedLocked ( device protocol . DeviceID , folder string ) error {
if err := m . checkFolderRunningLocked ( folder ) ; err != nil {
return err
}
if cfg , ok := m . cfg . Device ( device ) ; ! ok {
return errDeviceUnknown
} else if cfg . Paused {
return errDevicePaused
}
if _ , ok := m . conn [ device ] ; ! ok {
return errors . New ( "device is not connected" )
}
if ! m . folderDevices . has ( device , folder ) {
return errors . New ( "folder is not shared with device" )
}
return nil
}
2015-07-22 09:02:55 +02:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
2015-04-25 22:53:44 +01:00
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk ( data [ ] string , skip , get int ) ( [ ] string , int , int ) {
l := len ( data )
if l <= skip {
return [ ] string { } , skip - l , get
} else if l < skip + get {
return data [ skip : l ] , 0 , get - ( l - skip )
}
return data [ skip : skip + get ] , 0 , 0
}
2015-07-22 09:02:55 +02:00
func closeRawConn ( conn io . Closer ) error {
if conn , ok := conn . ( * tls . Conn ) ; ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline set.
conn . SetWriteDeadline ( time . Now ( ) . Add ( 250 * time . Millisecond ) )
}
return conn . Close ( )
}
2015-11-13 13:30:52 +01:00
func stringSliceWithout ( ss [ ] string , s string ) [ ] string {
for i := range ss {
if ss [ i ] == s {
copy ( ss [ i : ] , ss [ i + 1 : ] )
ss = ss [ : len ( ss ) - 1 ]
return ss
}
}
return ss
}
2016-03-18 08:28:44 +00:00
2017-08-19 14:36:56 +00:00
func readOffsetIntoBuf ( fs fs . Filesystem , file string , offset int64 , buf [ ] byte ) error {
fd , err := fs . Open ( file )
2016-04-15 10:59:41 +00:00
if err != nil {
l . Debugln ( "readOffsetIntoBuf.Open" , file , err )
return err
}
defer fd . Close ( )
_ , err = fd . ReadAt ( buf , offset )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.ReadAt" , file , err )
}
return err
}
2016-04-09 11:25:06 +00:00
// The exists function is expected to return true for all known paths
// (excluding "" and ".")
2016-03-18 08:28:44 +00:00
func unifySubs ( dirs [ ] string , exists func ( dir string ) bool ) [ ] string {
2018-01-14 21:52:41 +00:00
if len ( dirs ) == 0 {
return nil
2016-03-18 08:28:44 +00:00
}
2018-01-14 21:52:41 +00:00
sort . Strings ( dirs )
if dirs [ 0 ] == "" || dirs [ 0 ] == "." || dirs [ 0 ] == string ( fs . PathSeparator ) {
return nil
}
prev := "./" // Anything that can't be parent of a clean path
for i := 0 ; i < len ( dirs ) ; {
2018-03-12 13:18:59 +01:00
dir , err := fs . Canonicalize ( dirs [ i ] )
if err != nil {
l . Debugf ( "Skipping %v for scan: %s" , dirs [ i ] , err )
dirs = append ( dirs [ : i ] , dirs [ i + 1 : ] ... )
continue
}
2018-01-14 21:52:41 +00:00
if dir == prev || strings . HasPrefix ( dir , prev + string ( fs . PathSeparator ) ) {
dirs = append ( dirs [ : i ] , dirs [ i + 1 : ] ... )
continue
2016-03-18 08:28:44 +00:00
}
2018-01-14 21:52:41 +00:00
parent := filepath . Dir ( dir )
for parent != "." && parent != string ( fs . PathSeparator ) && ! exists ( parent ) {
dir = parent
parent = filepath . Dir ( dir )
}
dirs [ i ] = dir
prev = dir
i ++
2016-03-18 08:28:44 +00:00
}
2018-01-14 21:52:41 +00:00
return dirs
2016-03-18 08:28:44 +00:00
}
2016-05-01 06:49:29 +00:00
// makeForgetUpdate takes an index update and constructs a download progress update
// causing to forget any progress for files which we've just been sent.
func makeForgetUpdate ( files [ ] protocol . FileInfo ) [ ] protocol . FileDownloadProgressUpdate {
updates := make ( [ ] protocol . FileDownloadProgressUpdate , 0 , len ( files ) )
for _ , file := range files {
if file . IsSymlink ( ) || file . IsDirectory ( ) || file . IsDeleted ( ) {
continue
}
updates = append ( updates , protocol . FileDownloadProgressUpdate {
Name : file . Name ,
Version : file . Version ,
UpdateType : protocol . UpdateTypeForget ,
} )
}
return updates
}
2016-08-05 07:13:52 +00:00
2016-11-07 16:40:48 +00:00
// folderDeviceSet is a set of (folder, deviceID) pairs
type folderDeviceSet map [ string ] map [ protocol . DeviceID ] struct { }
// set adds the (dev, folder) pair to the set
func ( s folderDeviceSet ) set ( dev protocol . DeviceID , folder string ) {
devs , ok := s [ folder ]
if ! ok {
devs = make ( map [ protocol . DeviceID ] struct { } )
s [ folder ] = devs
}
devs [ dev ] = struct { } { }
}
// has returns true if the (dev, folder) pair is in the set
func ( s folderDeviceSet ) has ( dev protocol . DeviceID , folder string ) bool {
_ , ok := s [ folder ] [ dev ]
return ok
}
// hasDevice returns true if the device is set on any folder
func ( s folderDeviceSet ) hasDevice ( dev protocol . DeviceID ) bool {
for _ , devices := range s {
if _ , ok := devices [ dev ] ; ok {
return true
}
}
return false
}
2016-11-17 07:45:45 +01:00
// sortedDevices returns the list of devices for a given folder, sorted
func ( s folderDeviceSet ) sortedDevices ( folder string ) [ ] protocol . DeviceID {
devs := make ( [ ] protocol . DeviceID , 0 , len ( s [ folder ] ) )
for dev := range s [ folder ] {
devs = append ( devs , dev )
}
sort . Sort ( protocol . DeviceIDs ( devs ) )
return devs
}