2014-11-16 21:13:20 +01:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 21:43:32 +02:00
//
2015-03-07 21:36:35 +01:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-09 07:52:18 +01:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-06-01 22:50:14 +02:00
2014-05-15 00:26:55 -03:00
package model
2013-12-15 11:43:31 +01:00
import (
2014-09-14 23:03:53 +01:00
"bufio"
2014-09-10 08:48:15 +02:00
"crypto/tls"
2015-03-10 23:45:43 +01:00
"encoding/json"
2014-01-06 21:31:36 +01:00
"errors"
2013-12-23 12:12:44 -05:00
"fmt"
2013-12-31 21:22:49 -05:00
"io"
2014-01-05 23:54:57 +01:00
"net"
2013-12-15 11:43:31 +01:00
"os"
2014-03-28 14:36:57 +01:00
"path/filepath"
2015-06-03 09:47:39 +02:00
"reflect"
2015-04-29 20:46:32 +02:00
"runtime"
2016-03-18 08:28:44 +00:00
"sort"
2014-08-11 20:20:01 +02:00
"strings"
2013-12-15 11:43:31 +01:00
"time"
2014-06-21 09:43:12 +02:00
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/config"
2016-05-04 19:38:12 +00:00
"github.com/syncthing/syncthing/lib/connections"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
2016-08-05 17:45:45 +00:00
"github.com/syncthing/syncthing/lib/fs"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 19:38:46 +02:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/sync"
2016-12-17 19:48:33 +00:00
"github.com/syncthing/syncthing/lib/upgrade"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/versioner"
2017-02-06 10:27:11 +00:00
"github.com/syncthing/syncthing/lib/weakhash"
2015-06-12 13:04:00 +02:00
"github.com/thejerf/suture"
2013-12-15 11:43:31 +01:00
)
2014-07-15 13:04:37 +02:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 20:54:59 +02:00
const (
2016-07-04 10:40:29 +00:00
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexBatchSize = 1000 // Either way, don't include more files than this
2014-08-11 20:54:59 +02:00
)
2014-07-15 13:04:37 +02:00
2014-09-30 17:52:05 +02:00
type service interface {
2014-12-30 09:35:21 +01:00
BringToFront ( string )
2015-05-01 14:30:17 +02:00
DelayScan ( d time . Duration )
2016-06-29 06:37:34 +00:00
IndexUpdated ( ) // Remote index was updated notification
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2015-06-20 19:26:25 +02:00
Scan ( subs [ ] string ) error
2016-06-29 06:37:34 +00:00
Serve ( )
Stop ( )
2015-03-16 21:14:19 +01:00
2016-06-29 06:37:34 +00:00
getState ( ) ( folderState , time . Time , error )
2015-04-13 05:12:01 +09:00
setState ( state folderState )
2015-06-13 19:10:11 +01:00
clearError ( )
2016-06-29 06:37:34 +00:00
setError ( err error )
2014-09-30 17:52:05 +02:00
}
2016-04-15 10:59:41 +00:00
type Availability struct {
ID protocol . DeviceID ` json:"id" `
FromTemporary bool ` json:"fromTemporary" `
}
2013-12-15 11:43:31 +01:00
type Model struct {
2015-06-12 13:04:00 +02:00
* suture . Supervisor
2015-07-23 16:13:53 +02:00
cfg * config . Wrapper
2015-10-31 12:31:25 +01:00
db * db . Instance
2015-07-23 16:13:53 +02:00
finder * db . BlockFinder
progressEmitter * ProgressEmitter
id protocol . DeviceID
2016-01-20 11:10:22 -08:00
shortID protocol . ShortID
2015-07-23 16:13:53 +02:00
cacheIgnoredFiles bool
2015-10-18 20:13:58 -04:00
protectedFiles [ ] string
2014-05-15 00:26:55 -03:00
2014-09-28 12:05:25 +01:00
deviceName string
2014-05-15 00:26:55 -03:00
clientName string
clientVersion string
2015-11-13 13:30:52 +01:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
folderFiles map [ string ] * db . FileSet // folder -> files
2016-11-07 16:40:48 +00:00
folderDevices folderDeviceSet // folder -> deviceIDs
2015-11-13 13:30:52 +01:00
deviceFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
folderRunners map [ string ] service // folder -> puller or scanner
folderRunnerTokens map [ string ] [ ] suture . ServiceToken // folder -> tokens for puller or scanner
folderStatRefs map [ string ] * stats . FolderStatisticsReference // folder -> statsRef
fmut sync . RWMutex // protects the above
2014-03-29 18:53:48 +01:00
2016-12-21 18:41:25 +00:00
conn map [ protocol . DeviceID ] connections . Connection
closed map [ protocol . DeviceID ] chan struct { }
helloMessages map [ protocol . DeviceID ] protocol . HelloResult
deviceDownloads map [ protocol . DeviceID ] * deviceDownloadState
remotePausedFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
pmut sync . RWMutex // protects the above
2013-12-15 11:43:31 +01:00
}
2016-08-05 17:45:45 +00:00
type folderFactory func ( * Model , config . FolderConfiguration , versioner . Versioner , * fs . MtimeFS ) service
2016-05-04 10:47:33 +00:00
2014-01-07 22:44:21 +01:00
var (
2016-05-04 11:26:36 +00:00
folderFactories = make ( map [ config . FolderType ] folderFactory , 0 )
2014-01-07 22:44:21 +01:00
)
2014-01-06 21:31:36 +01:00
2016-06-26 10:07:27 +00:00
var (
2016-07-02 19:38:39 +00:00
errFolderPathEmpty = errors . New ( "folder path empty" )
2016-06-26 10:07:27 +00:00
errFolderPathMissing = errors . New ( "folder path missing" )
errFolderMarkerMissing = errors . New ( "folder marker missing" )
errHomeDiskNoSpace = errors . New ( "home disk has insufficient free space" )
errFolderNoSpace = errors . New ( "folder has insufficient free space" )
2016-08-05 07:13:52 +00:00
errInvalidFilename = errors . New ( "filename is invalid" )
2016-08-05 09:29:49 +00:00
errDeviceUnknown = errors . New ( "unknown device" )
errDevicePaused = errors . New ( "device is paused" )
errDeviceIgnored = errors . New ( "device is ignored" )
2016-12-01 12:35:11 +00:00
errNotRelative = errors . New ( "not a relative path" )
2017-02-09 20:29:56 +00:00
errFolderPaused = errors . New ( "folder is paused" )
errFolderMissing = errors . New ( "no such folder" )
2016-06-26 10:07:27 +00:00
)
2014-01-06 11:11:18 +01:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 12:00:38 +01:00
// for file data without altering the local folder in any way.
2015-10-31 12:31:25 +01:00
func NewModel ( cfg * config . Wrapper , id protocol . DeviceID , deviceName , clientName , clientVersion string , ldb * db . Instance , protectedFiles [ ] string ) * Model {
2013-12-15 11:43:31 +01:00
m := & Model {
2015-07-11 11:12:20 +10:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( line )
2015-07-11 11:12:20 +10:00
} ,
} ) ,
2016-12-21 18:41:25 +00:00
cfg : cfg ,
db : ldb ,
finder : db . NewBlockFinder ( ldb ) ,
progressEmitter : NewProgressEmitter ( cfg ) ,
id : id ,
shortID : id . Short ( ) ,
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
protectedFiles : protectedFiles ,
deviceName : deviceName ,
clientName : clientName ,
clientVersion : clientVersion ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
folderDevices : make ( folderDeviceSet ) ,
deviceFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderRunnerTokens : make ( map [ string ] [ ] suture . ServiceToken ) ,
folderStatRefs : make ( map [ string ] * stats . FolderStatisticsReference ) ,
conn : make ( map [ protocol . DeviceID ] connections . Connection ) ,
closed : make ( map [ protocol . DeviceID ] chan struct { } ) ,
helloMessages : make ( map [ protocol . DeviceID ] protocol . HelloResult ) ,
deviceDownloads : make ( map [ protocol . DeviceID ] * deviceDownloadState ) ,
remotePausedFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
fmut : sync . NewRWMutex ( ) ,
pmut : sync . NewRWMutex ( ) ,
2013-12-15 11:43:31 +01:00
}
2014-11-25 22:07:18 +00:00
if cfg . Options ( ) . ProgressUpdateIntervalS > - 1 {
go m . progressEmitter . Serve ( )
}
2016-08-07 16:21:59 +00:00
cfg . Subscribe ( m )
2013-12-15 11:43:31 +01:00
return m
}
2015-04-28 22:32:10 +02:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2015-04-08 13:35:03 +01:00
func ( m * Model ) StartDeadlockDetector ( timeout time . Duration ) {
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2016-10-30 00:14:38 +01:00
detector := newDeadlockDetector ( timeout )
detector . Watch ( "fmut" , m . fmut )
detector . Watch ( "pmut" , m . pmut )
2015-04-08 13:35:03 +01:00
}
2016-06-26 10:07:27 +00:00
// StartFolder constructs the folder service and starts it.
2016-05-04 10:47:33 +00:00
func ( m * Model ) StartFolder ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
2016-08-07 16:21:59 +00:00
folderType := m . startFolderLocked ( folder )
2016-12-19 10:12:06 +01:00
folderCfg := m . folderCfgs [ folder ]
2016-12-21 18:41:25 +00:00
m . pmut . Unlock ( )
2016-08-07 16:21:59 +00:00
m . fmut . Unlock ( )
2016-12-19 10:12:06 +01:00
l . Infof ( "Ready to synchronize %s (%s)" , folderCfg . Description ( ) , folderType )
2016-08-07 16:21:59 +00:00
}
func ( m * Model ) startFolderLocked ( folder string ) config . FolderType {
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2014-09-27 14:44:15 +02:00
if ! ok {
2016-11-17 17:12:41 +02:00
panic ( "cannot start nonexistent folder " + cfg . Description ( ) )
2014-09-27 14:44:15 +02:00
}
2014-09-30 17:52:05 +02:00
_ , ok = m . folderRunners [ folder ]
if ok {
2016-11-17 17:12:41 +02:00
panic ( "cannot start already running folder " + cfg . Description ( ) )
2014-09-30 17:52:05 +02:00
}
2014-09-27 14:44:15 +02:00
2016-05-04 10:47:33 +00:00
folderFactory , ok := folderFactories [ cfg . Type ]
if ! ok {
2016-05-04 11:26:36 +00:00
panic ( fmt . Sprintf ( "unknown folder type 0x%x" , cfg . Type ) )
2016-05-04 10:47:33 +00:00
}
2016-06-26 10:07:27 +00:00
fs := m . folderFiles [ folder ]
2016-08-07 16:21:59 +00:00
// Find any devices for which we hold the index in the db, but the folder
// is not shared, and drop it.
expected := mapDevices ( cfg . DeviceIDs ( ) )
for _ , available := range fs . ListDevices ( ) {
if _ , ok := expected [ available ] ; ! ok {
l . Debugln ( "dropping" , folder , "state for" , available )
fs . Replace ( available , nil )
}
}
2016-12-21 18:41:25 +00:00
// Close connections to affected devices
for _ , id := range cfg . DeviceIDs ( ) {
m . closeLocked ( id )
}
2016-07-29 19:54:24 +00:00
v , ok := fs . Sequence ( protocol . LocalDeviceID ) , true
2016-06-26 10:07:27 +00:00
indexHasFiles := ok && v > 0
if ! indexHasFiles {
// It's a blank folder, so this may the first time we're looking at
// it. Attempt to create and tag with our marker as appropriate. We
// don't really do anything with errors at this point except warn -
// if these things don't work, we still want to start the folder and
// it'll show up as errored later.
if _ , err := os . Stat ( cfg . Path ( ) ) ; os . IsNotExist ( err ) {
if err := osutil . MkdirAll ( cfg . Path ( ) , 0700 ) ; err != nil {
l . Warnln ( "Creating folder:" , err )
}
}
if err := cfg . CreateMarker ( ) ; err != nil {
l . Warnln ( "Creating folder marker:" , err )
}
}
2016-05-04 10:47:33 +00:00
var ver versioner . Versioner
2014-09-27 14:44:15 +02:00
if len ( cfg . Versioning . Type ) > 0 {
2016-05-04 10:47:33 +00:00
versionerFactory , ok := versioner . Factories [ cfg . Versioning . Type ]
2014-09-27 14:44:15 +02:00
if ! ok {
l . Fatalf ( "Requested versioning type %q that does not exist" , cfg . Versioning . Type )
}
2015-06-20 20:04:47 +02:00
2016-05-04 10:47:33 +00:00
ver = versionerFactory ( folder , cfg . Path ( ) , cfg . Versioning . Params )
if service , ok := ver . ( suture . Service ) ; ok {
2015-06-12 13:04:00 +02:00
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
2015-11-13 13:30:52 +01:00
token := m . Add ( service )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2015-06-12 13:04:00 +02:00
}
2014-03-29 18:53:48 +01:00
}
2014-09-27 14:44:15 +02:00
2016-08-05 17:45:45 +00:00
p := folderFactory ( m , cfg , ver , fs . MtimeFS ( ) )
2016-05-04 10:47:33 +00:00
m . folderRunners [ folder ] = p
2015-10-18 20:13:58 -04:00
m . warnAboutOverwritingProtectedFiles ( folder )
2015-11-13 13:30:52 +01:00
token := m . Add ( p )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2015-07-23 16:13:53 +02:00
2016-08-07 16:21:59 +00:00
return cfg . Type
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
2015-10-18 20:13:58 -04:00
func ( m * Model ) warnAboutOverwritingProtectedFiles ( folder string ) {
2016-12-16 22:23:35 +00:00
if m . folderCfgs [ folder ] . Type == config . FolderTypeSendOnly {
2015-10-18 20:13:58 -04:00
return
}
folderLocation := m . folderCfgs [ folder ] . Path ( )
ignores := m . folderIgnores [ folder ]
var filesAtRisk [ ] string
for _ , protectedFilePath := range m . protectedFiles {
// check if file is synced in this folder
if ! strings . HasPrefix ( protectedFilePath , folderLocation ) {
continue
}
// check if file is ignored
2017-03-04 07:49:48 +00:00
relPath , _ := filepath . Rel ( folderLocation , protectedFilePath )
if ignores . Match ( relPath ) . IsIgnored ( ) {
2015-10-18 20:13:58 -04:00
continue
}
filesAtRisk = append ( filesAtRisk , protectedFilePath )
}
if len ( filesAtRisk ) > 0 {
2016-10-27 17:02:19 +00:00
l . Warnln ( "Some protected files may be overwritten and cause issues. See https://docs.syncthing.net/users/config.html#syncing-configuration-files for more information. The at risk files are:" , strings . Join ( filesAtRisk , ", " ) )
2015-10-18 20:13:58 -04:00
}
}
2016-08-07 16:21:59 +00:00
func ( m * Model ) AddFolder ( cfg config . FolderConfiguration ) {
if len ( cfg . ID ) == 0 {
panic ( "cannot add empty folder id" )
}
m . fmut . Lock ( )
m . addFolderLocked ( cfg )
m . fmut . Unlock ( )
}
func ( m * Model ) addFolderLocked ( cfg config . FolderConfiguration ) {
m . folderCfgs [ cfg . ID ] = cfg
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , m . db )
2016-11-07 16:40:48 +00:00
for _ , device := range cfg . Devices {
m . folderDevices . set ( device . DeviceID , cfg . ID )
2016-08-07 16:21:59 +00:00
m . deviceFolders [ device . DeviceID ] = append ( m . deviceFolders [ device . DeviceID ] , cfg . ID )
}
ignores := ignore . New ( m . cacheIgnoredFiles )
if err := ignores . Load ( filepath . Join ( cfg . Path ( ) , ".stignore" ) ) ; err != nil && ! os . IsNotExist ( err ) {
l . Warnln ( "Loading ignores:" , err )
}
m . folderIgnores [ cfg . ID ] = ignores
}
2015-11-13 13:30:52 +01:00
func ( m * Model ) RemoveFolder ( folder string ) {
m . fmut . Lock ( )
m . pmut . Lock ( )
2017-01-07 17:05:30 +00:00
// Delete syncthing specific files
folderCfg := m . folderCfgs [ folder ]
folderPath := folderCfg . Path ( )
os . Remove ( filepath . Join ( folderPath , ".stfolder" ) )
2016-08-07 16:21:59 +00:00
m . tearDownFolderLocked ( folder )
// Remove it from the database
db . DropFolder ( m . db , folder )
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
func ( m * Model ) tearDownFolderLocked ( folder string ) {
2015-11-13 13:30:52 +01:00
// Stop the services running for this folder
for _ , id := range m . folderRunnerTokens [ folder ] {
m . Remove ( id )
}
// Close connections to affected devices
2016-11-07 16:40:48 +00:00
for dev := range m . folderDevices [ folder ] {
2015-11-13 13:30:52 +01:00
if conn , ok := m . conn [ dev ] ; ok {
closeRawConn ( conn )
}
}
// Clean up our config maps
delete ( m . folderCfgs , folder )
delete ( m . folderFiles , folder )
delete ( m . folderDevices , folder )
delete ( m . folderIgnores , folder )
delete ( m . folderRunners , folder )
delete ( m . folderRunnerTokens , folder )
delete ( m . folderStatRefs , folder )
for dev , folders := range m . deviceFolders {
m . deviceFolders [ dev ] = stringSliceWithout ( folders , folder )
}
2016-08-07 16:21:59 +00:00
}
2015-11-13 13:30:52 +01:00
2016-08-07 16:21:59 +00:00
func ( m * Model ) RestartFolder ( cfg config . FolderConfiguration ) {
if len ( cfg . ID ) == 0 {
panic ( "cannot add empty folder id" )
}
m . fmut . Lock ( )
m . pmut . Lock ( )
m . tearDownFolderLocked ( cfg . ID )
2016-12-21 18:41:25 +00:00
if ! cfg . Paused {
m . addFolderLocked ( cfg )
folderType := m . startFolderLocked ( cfg . ID )
l . Infoln ( "Restarted folder" , cfg . Description ( ) , fmt . Sprintf ( "(%s)" , folderType ) )
} else {
l . Infoln ( "Paused folder" , cfg . Description ( ) )
}
2015-11-13 13:30:52 +01:00
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
2014-01-05 23:54:57 +01:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 21:56:10 +02:00
Connected bool
Paused bool
2014-01-23 13:12:45 +01:00
Address string
ClientVersion string
2016-05-04 19:38:12 +00:00
Type string
2014-01-05 23:54:57 +01:00
}
2015-03-10 23:45:43 +01:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 21:56:10 +02:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 23:45:43 +01:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2016-05-04 19:38:12 +00:00
"type" : info . Type ,
2015-03-10 23:45:43 +01:00
} )
}
2015-11-09 23:48:58 +01:00
// ConnectionStats returns a map with connection statistics for each device.
2015-04-07 13:20:40 +01:00
func ( m * Model ) ConnectionStats ( ) map [ string ] interface { } {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-09-14 19:38:55 +00:00
m . pmut . RLock ( )
2014-01-05 16:16:37 +01:00
2015-08-23 21:56:10 +02:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
2016-12-21 18:41:25 +00:00
for device , deviceCfg := range devs {
2016-03-25 20:29:07 +00:00
hello := m . helloMessages [ device ]
versionString := hello . ClientVersion
if hello . ClientName != "syncthing" {
versionString = hello . ClientName + " " + hello . ClientVersion
}
2014-01-05 23:54:57 +01:00
ci := ConnectionInfo {
2016-05-04 19:38:12 +00:00
ClientVersion : strings . TrimSpace ( versionString ) ,
2016-12-21 18:41:25 +00:00
Paused : deviceCfg . Paused ,
2014-01-05 23:54:57 +01:00
}
2015-08-23 21:56:10 +02:00
if conn , ok := m . conn [ device ] ; ok {
2016-11-30 07:54:20 +00:00
ci . Type = conn . Type ( )
2015-08-23 21:56:10 +02:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 23:54:57 +01:00
}
2014-02-13 12:41:37 +01:00
2015-04-07 13:20:40 +01:00
conns [ device . String ( ) ] = ci
2013-12-30 09:30:29 -05:00
}
2014-01-17 20:06:44 -07:00
2015-04-07 13:20:40 +01:00
res [ "connections" ] = conns
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2016-09-14 19:38:55 +00:00
m . fmut . RUnlock ( )
2014-03-28 14:36:57 +01:00
2014-05-24 21:34:11 +02:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 21:56:05 +02:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 21:34:11 +02:00
} ,
}
2014-01-05 16:16:37 +01:00
return res
2013-12-30 09:30:29 -05:00
}
2015-04-28 22:32:10 +02:00
// DeviceStatistics returns statistics about each device
2014-09-28 12:00:38 +01:00
func ( m * Model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
2016-12-06 08:54:04 +00:00
res := make ( map [ string ] stats . DeviceStatistics )
2014-10-06 09:25:45 +02:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 23:45:40 +01:00
}
return res
}
2015-04-28 22:32:10 +02:00
// FolderStatistics returns statistics about each folder
2014-12-07 20:21:12 +00:00
func ( m * Model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
2016-12-06 08:54:04 +00:00
res := make ( map [ string ] stats . FolderStatistics )
2014-12-07 20:21:12 +00:00
for id := range m . cfg . Folders ( ) {
res [ id ] = m . folderStatRef ( id ) . GetStatistics ( )
}
return res
}
2016-08-12 06:41:43 +00:00
type FolderCompletion struct {
CompletionPct float64
NeedBytes int64
GlobalBytes int64
2016-09-02 06:45:46 +00:00
NeedDeletes int64
2016-08-12 06:41:43 +00:00
}
2015-04-28 22:32:10 +02:00
// Completion returns the completion status, in percent, for the given device
// and folder.
2016-08-12 06:41:43 +00:00
func ( m * Model ) Completion ( device protocol . DeviceID , folder string ) FolderCompletion {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
rf , ok := m . folderFiles [ folder ]
2017-02-10 11:22:09 +00:00
ignores := m . folderIgnores [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-08-05 20:16:25 +02:00
if ! ok {
2016-08-12 06:41:43 +00:00
return FolderCompletion { } // Folder doesn't exist, so we hardly have any of it
2014-08-05 20:16:25 +02:00
}
2016-10-17 14:10:17 +02:00
tot := rf . GlobalSize ( ) . Bytes
2014-08-05 20:16:25 +02:00
if tot == 0 {
2016-08-12 06:41:43 +00:00
// Folder is empty, so we have all of it
return FolderCompletion {
CompletionPct : 100 ,
}
2014-08-05 20:16:25 +02:00
}
2016-05-26 06:53:27 +00:00
m . pmut . RLock ( )
counts := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
m . pmut . RUnlock ( )
2016-09-02 06:45:46 +00:00
var need , fileNeed , downloaded , deletes int64
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2017-02-10 11:22:09 +00:00
if ignores . Match ( f . FileName ( ) ) . IsIgnored ( ) {
return true
}
2016-05-26 06:53:27 +00:00
ft := f . ( db . FileInfoTruncated )
2016-09-02 06:45:46 +00:00
// If the file is deleted, we account it only in the deleted column.
if ft . Deleted {
deletes ++
return true
}
2016-05-26 06:53:27 +00:00
// This might might be more than it really is, because some blocks can be of a smaller size.
downloaded = int64 ( counts [ ft . Name ] * protocol . BlockSize )
2016-09-02 06:45:46 +00:00
fileNeed = ft . FileSize ( ) - downloaded
2016-05-26 06:53:27 +00:00
if fileNeed < 0 {
fileNeed = 0
}
need += fileNeed
2014-07-29 11:06:52 +02:00
return true
} )
2015-10-21 09:10:26 +02:00
needRatio := float64 ( need ) / float64 ( tot )
completionPct := 100 * ( 1 - needRatio )
2016-09-02 06:45:46 +00:00
// If the completion is 100% but there are deletes we need to handle,
// drop it down a notch. Hack for consumers that look only at the
// percentage (our own GUI does the same calculation as here on it's own
// and needs the same fixup).
if need == 0 && deletes > 0 {
completionPct = 95 // chosen by fair dice roll
}
2015-10-21 09:10:26 +02:00
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d = %f)" , m , device , folder , completionPct , need , tot , needRatio )
2014-08-12 13:53:31 +02:00
2016-08-12 06:41:43 +00:00
return FolderCompletion {
CompletionPct : completionPct ,
NeedBytes : need ,
GlobalBytes : tot ,
2016-09-02 06:45:46 +00:00
NeedDeletes : deletes ,
2016-08-12 06:41:43 +00:00
}
2014-07-29 11:06:52 +02:00
}
2016-10-17 14:10:17 +02:00
func addSizeOfFile ( s * db . Counts , f db . FileIntf ) {
switch {
case f . IsDeleted ( ) :
s . Deleted ++
case f . IsDirectory ( ) :
s . Directories ++
case f . IsSymlink ( ) :
s . Symlinks ++
default :
s . Files ++
2013-12-30 09:30:29 -05:00
}
2016-10-17 14:10:17 +02:00
s . Bytes += f . FileSize ( )
2014-01-05 16:16:37 +01:00
}
2013-12-30 09:30:29 -05:00
2014-03-28 14:36:57 +01:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2016-10-17 14:10:17 +02:00
func ( m * Model ) GlobalSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-10-17 14:10:17 +02:00
return rf . GlobalSize ( )
2014-03-29 18:53:48 +01:00
}
2016-10-17 14:10:17 +02:00
return db . Counts { }
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 12:00:38 +01:00
// files in the local folder.
2016-10-17 14:10:17 +02:00
func ( m * Model ) LocalSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-10-17 14:10:17 +02:00
return rf . LocalSize ( )
2014-03-29 18:53:48 +01:00
}
2016-10-17 14:10:17 +02:00
return db . Counts { }
2014-01-06 06:38:01 +01:00
}
2014-05-19 22:31:28 +02:00
// NeedSize returns the number and total size of currently needed files.
2016-10-17 14:10:17 +02:00
func ( m * Model ) NeedSize ( folder string ) db . Counts {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2016-10-17 14:10:17 +02:00
var result db . Counts
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2016-08-05 07:13:52 +00:00
ignores := m . folderIgnores [ folder ]
cfg := m . folderCfgs [ folder ]
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2017-01-17 07:33:48 +00:00
if shouldIgnore ( f , ignores , cfg . IgnoreDelete ) {
2016-08-05 07:13:52 +00:00
return true
}
2016-10-17 14:10:17 +02:00
addSizeOfFile ( & result , f )
2014-07-15 17:54:00 +02:00
return true
} )
}
2016-10-17 14:10:17 +02:00
result . Bytes -= m . progressEmitter . BytesCompleted ( folder )
l . Debugf ( "%v NeedSize(%q): %v" , m , folder , result )
return result
2013-12-23 12:12:44 -05:00
}
2015-04-28 22:32:10 +02:00
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2015-04-25 22:53:44 +01:00
func ( m * Model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , int ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2015-01-17 21:51:46 +01:00
2015-04-25 22:53:44 +01:00
total := 0
2014-12-01 19:23:06 +00:00
2015-04-25 22:53:44 +01:00
rf , ok := m . folderFiles [ folder ]
if ! ok {
return nil , nil , nil , 0
}
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
runner , ok := m . folderRunners [ folder ]
if ok {
allProgressNames , allQueuedNames := runner . Jobs ( )
var progressNames , queuedNames [ ] string
progressNames , skip , get = getChunk ( allProgressNames , skip , get )
queuedNames , skip , get = getChunk ( allQueuedNames , skip , get )
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
seen [ name ] = struct { } { }
2014-12-01 19:23:06 +00:00
}
}
2015-04-25 22:53:44 +01:00
for i , name := range queuedNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
seen [ name ] = struct { } { }
}
2014-12-01 19:23:06 +00:00
}
2014-04-09 22:03:30 +02:00
}
2015-04-25 22:53:44 +01:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
2016-08-05 07:13:52 +00:00
ignores := m . folderIgnores [ folder ]
cfg := m . folderCfgs [ folder ]
2015-04-25 22:53:44 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2017-01-17 07:33:48 +00:00
if shouldIgnore ( f , ignores , cfg . IgnoreDelete ) {
2016-08-05 07:13:52 +00:00
return true
}
2015-04-25 22:53:44 +01:00
total ++
if skip > 0 {
skip --
return true
}
if get > 0 {
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
}
}
return true
} )
return progress , queued , rest , total
2014-04-01 23:18:32 +02:00
}
2014-09-28 12:00:38 +01:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2016-07-04 10:40:29 +00:00
func ( m * Model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2015-10-03 17:25:21 +02:00
l . Debugf ( "IDX(in): %s %q: %d files" , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
2016-01-01 20:11:12 +01:00
l . Debugf ( "Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
files , ok := m . folderFiles [ folder ]
2015-05-07 22:45:07 +02:00
runner := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
2016-12-06 08:54:04 +00:00
if ! ok {
l . Fatalf ( "Index for nonexistent folder %q" , folder )
}
2015-05-07 22:45:07 +02:00
if runner != nil {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
defer runner . IndexUpdated ( )
}
2016-05-01 06:49:29 +00:00
m . pmut . RLock ( )
m . deviceDownloads [ deviceID ] . Update ( folder , makeForgetUpdate ( fs ) )
m . pmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
files . Replace ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2016-07-29 19:54:24 +00:00
"version" : files . Sequence ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2013-12-28 08:10:36 -05:00
}
2014-09-28 12:00:38 +01:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2016-07-04 10:40:29 +00:00
func ( m * Model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo ) {
2015-10-03 17:25:21 +02:00
l . Debugf ( "%v IDXUP(in): %s / %q: %d files" , m , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
2016-01-01 20:11:12 +01:00
l . Debugf ( "Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-05-07 22:45:07 +02:00
files := m . folderFiles [ folder ]
runner , ok := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
if ! ok {
2016-06-30 13:42:53 +00:00
l . Fatalf ( "IndexUpdate for nonexistent folder %q" , folder )
2013-12-28 08:10:36 -05:00
}
2014-07-13 21:07:24 +02:00
2016-05-01 06:49:29 +00:00
m . pmut . RLock ( )
m . deviceDownloads [ deviceID ] . Update ( folder , makeForgetUpdate ( fs ) )
m . pmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
files . Update ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2016-07-29 19:54:24 +00:00
"version" : files . Sequence ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2015-05-07 22:45:07 +02:00
runner . IndexUpdated ( )
2014-01-09 10:59:09 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) folderSharedWith ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-12-06 08:54:04 +00:00
shared := m . folderSharedWithLocked ( folder , deviceID )
m . fmut . RUnlock ( )
return shared
2016-01-01 20:11:12 +01:00
}
2016-08-07 16:21:59 +00:00
func ( m * Model ) folderSharedWithLocked ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:00:38 +01:00
for _ , nfolder := range m . deviceFolders [ deviceID ] {
if nfolder == folder {
2014-06-06 21:48:29 +02:00
return true
}
}
return false
}
2016-07-04 10:40:29 +00:00
func ( m * Model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfig ) {
2016-01-01 20:11:12 +01:00
// Check the peer device's announced folders against our own. Emits events
// for folders that we don't expect (unknown or not shared).
2016-04-15 10:59:41 +00:00
// Also, collect a list of folders we do share, and if he's interested in
// temporary indexes, subscribe the connection.
tempIndexFolders := make ( [ ] string , 0 , len ( cm . Folders ) )
2016-01-01 20:11:12 +01:00
2016-07-23 12:46:31 +00:00
m . pmut . RLock ( )
2016-07-27 21:36:25 +00:00
conn , ok := m . conn [ deviceID ]
2016-12-17 19:48:33 +00:00
hello := m . helloMessages [ deviceID ]
2016-07-23 12:46:31 +00:00
m . pmut . RUnlock ( )
2016-07-27 21:36:25 +00:00
if ! ok {
panic ( "bug: ClusterConfig called on closed or nonexistent connection" )
}
2016-07-23 12:46:31 +00:00
2016-07-27 21:38:43 +00:00
dbLocation := filepath . Dir ( m . db . Location ( ) )
2016-12-17 19:48:33 +00:00
// See issue #3802 - in short, we can't send modern symlink entries to older
// clients.
dropSymlinks := false
if hello . ClientName == m . clientName && upgrade . CompareVersions ( hello . ClientVersion , "v0.14.14" ) < 0 {
l . Warnln ( "Not sending symlinks to old client" , deviceID , "- please upgrade to v0.14.14 or newer" )
dropSymlinks = true
}
2016-07-04 10:40:29 +00:00
m . fmut . Lock ( )
2016-12-21 18:41:25 +00:00
var paused [ ] string
2016-01-01 20:11:12 +01:00
for _ , folder := range cm . Folders {
2016-12-21 18:41:25 +00:00
if folder . Paused {
paused = append ( paused , folder . ID )
continue
}
if cfg , ok := m . cfg . Folder ( folder . ID ) ; ok && cfg . Paused {
continue
}
2016-08-07 16:21:59 +00:00
if ! m . folderSharedWithLocked ( folder . ID , deviceID ) {
2016-01-01 20:11:12 +01:00
events . Default . Log ( events . FolderRejected , map [ string ] string {
2016-03-11 09:48:46 +00:00
"folder" : folder . ID ,
"folderLabel" : folder . Label ,
"device" : deviceID . String ( ) ,
2016-01-01 20:11:12 +01:00
} )
2016-11-22 08:36:14 +01:00
l . Infof ( "Unexpected folder %s sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder . Description ( ) , deviceID )
2016-01-01 20:11:12 +01:00
continue
}
2016-07-04 10:40:29 +00:00
if ! folder . DisableTempIndexes {
2016-04-15 10:59:41 +00:00
tempIndexFolders = append ( tempIndexFolders , folder . ID )
}
2016-07-23 12:46:31 +00:00
fs := m . folderFiles [ folder . ID ]
myIndexID := fs . IndexID ( protocol . LocalDeviceID )
2016-07-29 19:54:24 +00:00
mySequence := fs . Sequence ( protocol . LocalDeviceID )
var startSequence int64
2016-07-23 12:46:31 +00:00
for _ , dev := range folder . Devices {
2016-10-29 21:56:24 +00:00
if dev . ID == m . id {
2016-07-23 12:46:31 +00:00
// This is the other side's description of what it knows
// about us. Lets check to see if we can start sending index
// updates directly or need to send the index from start...
if dev . IndexID == myIndexID {
// They say they've seen our index ID before, so we can
// send a delta update only.
2016-07-29 19:54:24 +00:00
if dev . MaxSequence > mySequence {
2016-07-23 12:46:31 +00:00
// Safety check. They claim to have more or newer
// index data than we have - either we have lost
// index data, or reset the index without resetting
// the IndexID, or something else weird has
// happened. We send a full index to reset the
// situation.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s is delta index compatible, but seems out of sync with reality" , deviceID , folder . Description ( ) )
2016-07-29 19:54:24 +00:00
startSequence = 0
2016-07-23 12:46:31 +00:00
continue
}
2016-11-22 08:36:14 +01:00
l . Debugf ( "Device %v folder %s is delta index compatible (mlv=%d)" , deviceID , folder . Description ( ) , dev . MaxSequence )
2016-07-29 19:54:24 +00:00
startSequence = dev . MaxSequence
2016-07-23 12:46:31 +00:00
} else if dev . IndexID != 0 {
// They say they've seen an index ID from us, but it's
// not the right one. Either they are confused or we
// must have reset our database since last talking to
// them. We'll start with a full index transfer.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s has mismatching index ID for us (%v != %v)" , deviceID , folder . Description ( ) , dev . IndexID , myIndexID )
2016-07-29 19:54:24 +00:00
startSequence = 0
2016-07-23 12:46:31 +00:00
}
2016-10-29 21:56:24 +00:00
} else if dev . ID == deviceID && dev . IndexID != 0 {
2016-07-23 12:46:31 +00:00
// This is the other side's description of themselves. We
// check to see that it matches the IndexID we have on file,
// otherwise we drop our old index data and expect to get a
// completely new set.
theirIndexID := fs . IndexID ( deviceID )
if dev . IndexID == 0 {
// They're not announcing an index ID. This means they
// do not support delta indexes and we should clear any
// information we have from them before accepting their
// index, which will presumably be a full index.
fs . Replace ( deviceID , nil )
} else if dev . IndexID != theirIndexID {
// The index ID we have on file is not what they're
// announcing. They must have reset their database and
// will probably send us a full index. We drop any
// information we have and remember this new index ID
// instead.
2016-11-22 08:36:14 +01:00
l . Infof ( "Device %v folder %s has a new index ID (%v)" , deviceID , folder . Description ( ) , dev . IndexID )
2016-07-23 12:46:31 +00:00
fs . Replace ( deviceID , nil )
fs . SetIndexID ( deviceID , dev . IndexID )
2016-07-27 21:35:41 +00:00
} else {
// They're sending a recognized index ID and will most
// likely use delta indexes. We might already have files
// that we need to pull so let the folder runner know
// that it should recheck the index data.
if runner := m . folderRunners [ folder . ID ] ; runner != nil {
defer runner . IndexUpdated ( )
}
2016-07-23 12:46:31 +00:00
}
}
}
2016-12-17 19:48:33 +00:00
go sendIndexes ( conn , folder . ID , fs , m . folderIgnores [ folder . ID ] , startSequence , dbLocation , dropSymlinks )
2016-01-01 20:11:12 +01:00
}
2016-12-21 18:41:25 +00:00
m . pmut . Lock ( )
m . remotePausedFolders [ deviceID ] = paused
m . pmut . Unlock ( )
2016-04-15 10:59:41 +00:00
// This breaks if we send multiple CM messages during the same connection.
if len ( tempIndexFolders ) > 0 {
m . pmut . RLock ( )
conn , ok := m . conn [ deviceID ]
m . pmut . RUnlock ( )
// In case we've got ClusterConfig, and the connection disappeared
// from infront of our nose.
if ok {
m . progressEmitter . temporaryIndexSubscribe ( conn , tempIndexFolders )
}
}
2016-11-07 16:40:48 +00:00
var changed = false
if deviceCfg := m . cfg . Devices ( ) [ deviceID ] ; deviceCfg . Introducer {
foldersDevices , introduced := m . handleIntroductions ( deviceCfg , cm )
if introduced {
changed = true
}
// If permitted, check if the introducer has unshare devices/folders with
// some of the devices/folders that we know were introduced to us by him.
if ! deviceCfg . SkipIntroductionRemovals && m . handleDeintroductions ( deviceCfg , cm , foldersDevices ) {
changed = true
}
}
2016-11-17 08:50:24 +00:00
m . fmut . Unlock ( )
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
if changed {
if err := m . cfg . Save ( ) ; err != nil {
l . Warnln ( "Failed to save config" , err )
}
}
}
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
// handleIntroductions handles adding devices/shares that are shared by an introducer device
func ( m * Model ) handleIntroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig ) ( folderDeviceSet , bool ) {
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing, remove what we have extra that
// has been introducer by the introducer.
changed := false
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
foldersDevices := make ( folderDeviceSet )
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
for _ , folder := range cm . Folders {
// We don't have this folder, skip.
if _ , ok := m . folderDevices [ folder . ID ] ; ! ok {
continue
}
2014-09-23 16:04:20 +02:00
2016-11-07 16:40:48 +00:00
// Adds devices which we do not have, but the introducer has
// for the folders that we have in common. Also, shares folders
// with devices that we have in common, yet are currently not sharing
// the folder.
nextDevice :
for _ , device := range folder . Devices {
foldersDevices . set ( device . ID , folder . ID )
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
if _ , ok := m . cfg . Devices ( ) [ device . ID ] ; ! ok {
// The device is currently unknown. Add it to the config.
m . introduceDevice ( device , introducerCfg )
changed = true
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
for _ , er := range m . deviceFolders [ device . ID ] {
if er == folder . ID {
// We already share the folder with this device, so
// nothing to do.
continue nextDevice
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
m . introduceDeviceToFolder ( device , folder , introducerCfg )
changed = true
}
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
return foldersDevices , changed
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// handleIntroductions handles removals of devices/shares that are removed by an introducer device
func ( m * Model ) handleDeintroductions ( introducerCfg config . DeviceConfiguration , cm protocol . ClusterConfig , foldersDevices folderDeviceSet ) bool {
changed := false
foldersIntroducedByOthers := make ( folderDeviceSet )
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// Check if we should unshare some folders, if the introducer has unshared them.
for _ , folderCfg := range m . cfg . Folders ( ) {
folderChanged := false
for i := 0 ; i < len ( folderCfg . Devices ) ; i ++ {
if folderCfg . Devices [ i ] . IntroducedBy == introducerCfg . DeviceID {
if ! foldersDevices . has ( folderCfg . Devices [ i ] . DeviceID , folderCfg . ID ) {
2016-11-17 08:56:55 +00:00
// We could not find that folder shared on the
// introducer with the device that was introduced to us.
2016-11-07 16:40:48 +00:00
// We should follow and unshare aswell.
2016-11-17 17:12:41 +02:00
l . Infof ( "Unsharing folder %s with %v as introducer %v no longer shares the folder with that device" , folderCfg . Description ( ) , folderCfg . Devices [ i ] . DeviceID , folderCfg . Devices [ i ] . IntroducedBy )
2016-11-07 16:40:48 +00:00
folderCfg . Devices = append ( folderCfg . Devices [ : i ] , folderCfg . Devices [ i + 1 : ] ... )
i --
folderChanged = true
}
} else {
foldersIntroducedByOthers . set ( folderCfg . Devices [ i ] . DeviceID , folderCfg . ID )
}
}
2016-11-07 16:40:48 +00:00
2016-11-07 16:40:48 +00:00
// We've modified the folder, hence update it.
if folderChanged {
m . cfg . SetFolder ( folderCfg )
changed = true
}
}
2016-11-07 16:40:48 +00:00
2016-11-17 08:56:55 +00:00
// Check if we should remove some devices, if the introducer no longer
// shares any folder with them. Yet do not remove if we share other
// folders that haven't been introduced by the introducer.
for _ , device := range m . cfg . Devices ( ) {
if device . IntroducedBy == introducerCfg . DeviceID {
if ! foldersDevices . hasDevice ( device . DeviceID ) {
if foldersIntroducedByOthers . hasDevice ( device . DeviceID ) {
l . Infof ( "Would have removed %v as %v no longer shares any folders, yet there are other folders that are shared with this device that haven't been introduced by this introducer." , device . DeviceID , device . IntroducedBy )
2016-11-07 16:40:48 +00:00
continue
}
2016-11-17 08:56:55 +00:00
// The introducer no longer shares any folder with the
// device, remove the device.
l . Infof ( "Removing device %v as introducer %v no longer shares any folders with that device" , device . DeviceID , device . IntroducedBy )
m . cfg . RemoveDevice ( device . DeviceID )
changed = true
2016-11-12 08:38:29 +01:00
}
}
2016-11-07 16:40:48 +00:00
}
2016-11-07 16:40:48 +00:00
return changed
}
func ( m * Model ) introduceDevice ( device protocol . Device , introducerCfg config . DeviceConfiguration ) {
addresses := [ ] string { "dynamic" }
for _ , addr := range device . Addresses {
if addr != "dynamic" {
addresses = append ( addresses , addr )
}
}
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , device . ID , introducerCfg . DeviceID )
newDeviceCfg := config . DeviceConfiguration {
DeviceID : device . ID ,
Name : device . Name ,
Compression : introducerCfg . Compression ,
Addresses : addresses ,
CertName : device . CertName ,
IntroducedBy : introducerCfg . DeviceID ,
}
// The introducers' introducers are also our introducers.
if device . Introducer {
l . Infof ( "Device %v is now also an introducer" , device . ID )
newDeviceCfg . Introducer = true
newDeviceCfg . SkipIntroductionRemovals = device . SkipIntroductionRemovals
2014-09-23 16:04:20 +02:00
}
2016-11-07 16:40:48 +00:00
m . cfg . SetDevice ( newDeviceCfg )
}
func ( m * Model ) introduceDeviceToFolder ( device protocol . Device , folder protocol . Folder , introducerCfg config . DeviceConfiguration ) {
2016-11-22 08:36:14 +01:00
l . Infof ( "Sharing folder %s with %v (vouched for by introducer %v)" , folder . Description ( ) , device . ID , introducerCfg . DeviceID )
2016-11-07 16:40:48 +00:00
m . deviceFolders [ device . ID ] = append ( m . deviceFolders [ device . ID ] , folder . ID )
m . folderDevices . set ( device . ID , folder . ID )
folderCfg := m . cfg . Folders ( ) [ folder . ID ]
folderCfg . Devices = append ( folderCfg . Devices , config . FolderDeviceConfiguration {
DeviceID : device . ID ,
IntroducedBy : introducerCfg . DeviceID ,
} )
m . cfg . SetFolder ( folderCfg )
2014-04-13 15:28:26 +02:00
}
2016-08-10 09:37:32 +00:00
// Closed is called when a connection has been closed
func ( m * Model ) Closed ( conn protocol . Connection , err error ) {
device := conn . ID ( )
2014-02-09 23:13:06 +01:00
2014-07-15 13:04:37 +02:00
m . pmut . Lock ( )
2015-06-28 16:05:29 +01:00
conn , ok := m . conn [ device ]
2014-01-01 08:09:17 -05:00
if ok {
2016-04-15 10:59:41 +00:00
m . progressEmitter . temporaryIndexUnsubscribe ( conn )
2013-12-30 21:21:57 -05:00
}
2015-06-28 16:05:29 +01:00
delete ( m . conn , device )
2016-03-25 20:29:07 +00:00
delete ( m . helloMessages , device )
2016-04-15 10:59:41 +00:00
delete ( m . deviceDownloads , device )
2016-12-21 18:41:25 +00:00
delete ( m . remotePausedFolders , device )
2016-08-10 09:37:32 +00:00
closed := m . closed [ device ]
delete ( m . closed , device )
m . pmut . Unlock ( )
l . Infof ( "Connection to %s closed: %v" , device , err )
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
"error" : err . Error ( ) ,
} )
close ( closed )
}
2016-12-21 18:41:25 +00:00
// close will close the underlying connection for a given device
func ( m * Model ) close ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . closeLocked ( device )
m . pmut . Unlock ( )
}
// closeLocked will close the underlying connection for a given device
func ( m * Model ) closeLocked ( device protocol . DeviceID ) {
conn , ok := m . conn [ device ]
if ! ok {
// There is no connection to close
return
}
closeRawConn ( conn )
}
2014-01-06 11:11:18 +01:00
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2016-07-04 10:40:29 +00:00
func ( m * Model ) Request ( deviceID protocol . DeviceID , folder , name string , offset int64 , hash [ ] byte , fromTemporary bool , buf [ ] byte ) error {
2015-07-29 21:38:22 +01:00
if offset < 0 {
2015-10-19 14:13:47 +02:00
return protocol . ErrInvalid
2015-01-18 02:12:06 +01:00
}
2015-01-16 12:25:54 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2016-04-15 10:59:41 +00:00
return protocol . ErrNoSuchFile
2015-01-16 12:25:54 +01:00
}
2015-10-03 17:25:21 +02:00
if deviceID != protocol . LocalDeviceID {
2016-07-04 10:40:29 +00:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d t=%v" , m , deviceID , folder , name , offset , len ( buf ) , fromTemporary )
2013-12-15 11:43:31 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-04-15 10:59:41 +00:00
folderCfg := m . folderCfgs [ folder ]
folderPath := folderCfg . Path ( )
2015-10-13 22:59:31 +09:00
folderIgnores := m . folderIgnores [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-11-09 04:26:52 +00:00
2016-12-01 12:35:11 +00:00
fn , err := rootedJoinedPath ( folderPath , name )
if err != nil {
2015-10-13 22:59:31 +09:00
// Request tries to escape!
l . Debugf ( "%v Invalid REQ(in) tries to escape: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
2015-10-19 14:13:47 +02:00
return protocol . ErrInvalid
2015-10-13 22:59:31 +09:00
}
2016-12-01 14:00:11 +00:00
// Having passed the rootedJoinedPath check above, we know "name" is
// acceptable relative to "folderPath" and in canonical form, so we can
// trust it.
if ignore . IsInternal ( name ) {
l . Debugf ( "%v REQ(in) for internal file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
return protocol . ErrNoSuchFile
}
if folderIgnores . Match ( name ) . IsIgnored ( ) {
l . Debugf ( "%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
return protocol . ErrNoSuchFile
2015-10-13 22:59:31 +09:00
}
2017-01-10 07:09:31 +00:00
if err := osutil . TraversesSymlink ( folderPath , filepath . Dir ( name ) ) ; err != nil {
l . Debugf ( "%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d" , m , err , deviceID , folder , name , offset , len ( buf ) )
2016-12-13 10:24:10 +00:00
return protocol . ErrNoSuchFile
2016-04-15 10:59:41 +00:00
}
2014-12-08 11:54:22 +00:00
2016-04-15 10:59:41 +00:00
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
2016-07-04 10:40:29 +00:00
if fromTemporary && ! folderCfg . DisableTempIndexes {
2017-01-17 07:33:48 +00:00
tempFn := filepath . Join ( folderPath , ignore . TempName ( name ) )
2016-12-13 10:24:10 +00:00
if info , err := osutil . Lstat ( tempFn ) ; err != nil || ! info . Mode ( ) . IsRegular ( ) {
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
return protocol . ErrNoSuchFile
}
2016-04-15 10:59:41 +00:00
if err := readOffsetIntoBuf ( tempFn , offset , buf ) ; err == nil {
return nil
}
// Fall through to reading from a non-temp file, just incase the temp
// file has finished downloading.
2013-12-15 11:43:31 +01:00
}
2016-12-13 10:24:10 +00:00
if info , err := osutil . Lstat ( fn ) ; err != nil || ! info . Mode ( ) . IsRegular ( ) {
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
return protocol . ErrNoSuchFile
}
2016-12-01 12:35:11 +00:00
err = readOffsetIntoBuf ( fn , offset , buf )
2016-04-15 10:59:41 +00:00
if os . IsNotExist ( err ) {
return protocol . ErrNoSuchFile
} else if err != nil {
2015-10-19 14:13:47 +02:00
return protocol . ErrGeneric
2013-12-15 11:43:31 +01:00
}
2015-07-29 21:38:22 +01:00
return nil
2013-12-15 11:43:31 +01:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
2016-12-06 08:54:04 +00:00
return fs . Get ( protocol . LocalDeviceID , file )
2014-04-01 23:18:32 +02:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
2016-12-06 08:54:04 +00:00
return fs . GetGlobal ( file )
2014-04-01 23:18:32 +02:00
}
2014-03-29 18:53:48 +01:00
type cFiler struct {
m * Model
r string
2014-01-06 11:11:18 +01:00
}
2014-03-16 08:14:55 +01:00
// Implements scanner.CurrentFiler
2015-01-06 22:12:45 +01:00
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:00:38 +01:00
return cf . m . CurrentFolderFile ( cf . r , file )
2014-03-16 08:14:55 +01:00
}
2014-09-28 12:00:38 +01:00
// ConnectedTo returns true if we are connected to the named device.
func ( m * Model ) ConnectedTo ( deviceID protocol . DeviceID ) bool {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ deviceID ]
2014-09-20 19:14:45 +02:00
m . pmut . RUnlock ( )
2014-09-10 11:29:01 +02:00
if ok {
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-10 11:29:01 +02:00
}
2014-01-06 11:11:18 +01:00
return ok
}
2014-11-08 22:12:18 +01:00
func ( m * Model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-09-14 23:03:53 +01:00
var lines [ ] string
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-14 23:03:53 +01:00
if ! ok {
2014-11-08 22:12:18 +01:00
return lines , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2015-12-30 21:30:47 +00:00
if ! cfg . HasMarker ( ) {
return lines , nil , fmt . Errorf ( "Folder %s stopped" , folder )
}
2015-04-05 22:52:22 +02:00
fd , err := os . Open ( filepath . Join ( cfg . Path ( ) , ".stignore" ) )
2014-09-14 23:03:53 +01:00
if err != nil {
if os . IsNotExist ( err ) {
2014-11-08 22:12:18 +01:00
return lines , nil , nil
2014-09-14 23:03:53 +01:00
}
l . Warnln ( "Loading .stignore:" , err )
2014-11-08 22:12:18 +01:00
return lines , nil , err
2014-09-14 23:03:53 +01:00
}
defer fd . Close ( )
scanner := bufio . NewScanner ( fd )
for scanner . Scan ( ) {
lines = append ( lines , strings . TrimSpace ( scanner . Text ( ) ) )
}
2014-11-29 22:29:49 +01:00
m . fmut . RLock ( )
2015-04-27 20:49:10 +01:00
patterns := m . folderIgnores [ folder ] . Patterns ( )
2014-11-29 22:29:49 +01:00
m . fmut . RUnlock ( )
2014-11-08 22:12:18 +01:00
return lines , patterns , nil
2014-09-14 23:03:53 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) SetIgnores ( folder string , content [ ] string ) error {
cfg , ok := m . folderCfgs [ folder ]
2014-09-14 23:03:53 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2015-08-30 12:59:01 +01:00
path := filepath . Join ( cfg . Path ( ) , ".stignore" )
2016-11-23 14:06:08 +00:00
fd , err := osutil . CreateAtomic ( path )
2014-09-14 23:03:53 +01:00
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
for _ , line := range content {
2015-07-12 01:03:40 +10:00
fmt . Fprintln ( fd , line )
2014-09-14 23:03:53 +01:00
}
2015-07-12 01:03:40 +10:00
if err := fd . Close ( ) ; err != nil {
2014-09-14 23:03:53 +01:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2015-08-30 12:59:01 +01:00
osutil . HideFile ( path )
2014-09-14 23:03:53 +01:00
2014-09-28 12:00:38 +01:00
return m . ScanFolder ( folder )
2014-09-14 23:03:53 +01:00
}
2016-03-25 20:29:07 +00:00
// OnHello is called when an device connects to us.
// This allows us to extract some information from the Hello message
// and add it to a list of known devices ahead of any checks.
2016-08-05 09:29:49 +00:00
func ( m * Model ) OnHello ( remoteID protocol . DeviceID , addr net . Addr , hello protocol . HelloResult ) error {
if m . cfg . IgnoredDevice ( remoteID ) {
return errDeviceIgnored
}
2016-12-21 18:41:25 +00:00
if cfg , ok := m . cfg . Device ( remoteID ) ; ok {
2016-08-05 09:29:49 +00:00
// The device exists
2016-12-21 18:41:25 +00:00
if cfg . Paused {
return errDevicePaused
}
2016-08-05 09:29:49 +00:00
return nil
2016-03-25 20:29:07 +00:00
}
2016-08-05 09:29:49 +00:00
events . Default . Log ( events . DeviceRejected , map [ string ] string {
"name" : hello . DeviceName ,
"device" : remoteID . String ( ) ,
"address" : addr . String ( ) ,
} )
return errDeviceUnknown
2016-03-25 20:29:07 +00:00
}
// GetHello is called when we are about to connect to some remote device.
2016-07-04 10:40:29 +00:00
func ( m * Model ) GetHello ( protocol . DeviceID ) protocol . HelloIntf {
return & protocol . Hello {
2016-03-25 20:29:07 +00:00
DeviceName : m . deviceName ,
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
}
}
2014-01-06 11:11:18 +01:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 12:00:38 +01:00
// folder changes.
2016-06-09 10:50:14 +00:00
func ( m * Model ) AddConnection ( conn connections . Connection , hello protocol . HelloResult ) {
2015-06-28 16:05:29 +01:00
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2016-08-10 09:37:32 +00:00
if oldConn , ok := m . conn [ deviceID ] ; ok {
l . Infoln ( "Replacing old connection" , oldConn , "with" , conn , "for" , deviceID )
// There is an existing connection to this device that we are
// replacing. We must close the existing connection and wait for the
// close to complete before adding the new connection. We do the
// actual close without holding pmut as the connection will call
// back into Closed() for the cleanup.
closed := m . closed [ deviceID ]
m . pmut . Unlock ( )
closeRawConn ( oldConn )
<- closed
m . pmut . Lock ( )
2014-03-23 08:45:05 +01:00
}
2016-08-10 09:37:32 +00:00
2015-06-28 16:05:29 +01:00
m . conn [ deviceID ] = conn
2016-08-10 09:37:32 +00:00
m . closed [ deviceID ] = make ( chan struct { } )
2016-04-15 10:59:41 +00:00
m . deviceDownloads [ deviceID ] = newDeviceDownloadState ( )
2014-01-06 11:11:18 +01:00
2016-03-25 20:29:07 +00:00
m . helloMessages [ deviceID ] = hello
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"deviceName" : hello . DeviceName ,
"clientName" : hello . ClientName ,
"clientVersion" : hello . ClientVersion ,
2016-11-30 07:54:20 +00:00
"type" : conn . Type ( ) ,
2016-03-25 20:29:07 +00:00
}
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
events . Default . Log ( events . DeviceConnected , event )
l . Infof ( ` Device %s client is "%s %s" named "%s" ` , deviceID , hello . ClientName , hello . ClientVersion , hello . DeviceName )
2015-06-28 16:05:29 +01:00
conn . Start ( )
2016-12-21 12:22:18 +00:00
m . pmut . Unlock ( )
2015-07-10 16:37:57 +10:00
2016-12-21 12:22:18 +00:00
// Acquires fmut, so has to be done outside of pmut.
2015-11-17 12:08:53 +01:00
cm := m . generateClusterConfig ( deviceID )
2015-06-28 16:05:29 +01:00
conn . ClusterConfig ( cm )
2014-09-20 19:14:45 +02:00
2016-04-18 20:25:31 +00:00
device , ok := m . cfg . Devices ( ) [ deviceID ]
2016-05-12 08:23:18 +00:00
if ok && ( device . Name == "" || m . cfg . Options ( ) . OverwriteRemoteDevNames ) {
2016-04-18 20:25:31 +00:00
device . Name = hello . DeviceName
m . cfg . SetDevice ( device )
m . cfg . Save ( )
}
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-20 19:14:45 +02:00
}
2016-07-04 10:40:29 +00:00
func ( m * Model ) DownloadProgress ( device protocol . DeviceID , folder string , updates [ ] protocol . FileDownloadProgressUpdate ) {
2016-04-15 10:59:41 +00:00
if ! m . folderSharedWith ( folder , device ) {
return
}
m . fmut . RLock ( )
cfg , ok := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
2016-12-16 22:23:35 +00:00
if ! ok || cfg . Type == config . FolderTypeSendOnly || cfg . DisableTempIndexes {
2016-04-15 10:59:41 +00:00
return
}
m . pmut . RLock ( )
m . deviceDownloads [ device ] . Update ( folder , updates )
2016-05-26 06:53:27 +00:00
state := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
2016-04-15 10:59:41 +00:00
m . pmut . RUnlock ( )
2016-05-22 07:52:08 +00:00
events . Default . Log ( events . RemoteDownloadProgress , map [ string ] interface { } {
"device" : device . String ( ) ,
"folder" : folder ,
2016-05-26 06:53:27 +00:00
"state" : state ,
2016-05-22 07:52:08 +00:00
} )
2016-04-15 10:59:41 +00:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2014-09-28 12:00:38 +01:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 19:14:45 +02:00
return sr
}
2014-12-08 16:36:15 +01:00
2015-09-04 13:22:59 +02:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
2014-12-08 16:36:15 +01:00
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 19:14:45 +02:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 13:04:37 +02:00
}
2014-12-07 20:21:12 +00:00
func ( m * Model ) folderStatRef ( folder string ) * stats . FolderStatisticsReference {
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-12-16 23:33:28 +01:00
sr , ok := m . folderStatRefs [ folder ]
if ! ok {
2014-12-07 20:21:12 +00:00
sr = stats . NewFolderStatisticsReference ( m . db , folder )
m . folderStatRefs [ folder ] = sr
}
2014-12-16 23:33:28 +01:00
return sr
2014-12-07 20:21:12 +00:00
}
2015-06-16 12:12:34 +01:00
func ( m * Model ) receivedFile ( folder string , file protocol . FileInfo ) {
2015-09-04 13:22:59 +02:00
m . folderStatRef ( folder ) . ReceivedFile ( file . Name , file . IsDeleted ( ) )
2014-12-07 20:21:12 +00:00
}
2016-12-17 19:48:33 +00:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher , startSequence int64 , dbLocation string , dropSymlinks bool ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
name := conn . Name ( )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2016-07-29 19:54:24 +00:00
l . Debugf ( "sendIndexes for %s-%s/%q starting (slv=%d)" , deviceID , name , folder , startSequence )
2015-10-03 17:25:21 +02:00
defer l . Debugf ( "sendIndexes for %s-%s/%q exiting: %v" , deviceID , name , folder , err )
2014-05-04 17:18:58 +02:00
2016-12-17 19:48:33 +00:00
minSequence , err := sendIndexTo ( startSequence , conn , folder , fs , ignores , dbLocation , dropSymlinks )
2014-07-30 20:08:04 +02:00
2016-01-11 16:49:44 +01:00
// Subscribe to LocalIndexUpdated (we have new information to send) and
// DeviceDisconnected (it might be us who disconnected, so we should
// exit).
sub := events . Default . Subscribe ( events . LocalIndexUpdated | events . DeviceDisconnected )
2015-07-28 21:22:44 +04:00
defer events . Default . Unsubscribe ( sub )
2014-07-15 13:04:37 +02:00
for err == nil {
2016-01-11 16:49:44 +01:00
if conn . Closed ( ) {
// Our work is done.
return
}
2016-07-29 19:54:24 +00:00
// While we have sent a sequence at least equal to the one
2015-07-28 21:22:44 +04:00
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2016-07-29 19:54:24 +00:00
if fs . Sequence ( protocol . LocalDeviceID ) <= minSequence {
2015-07-28 21:22:44 +04:00
sub . Poll ( time . Minute )
2014-07-30 20:08:04 +02:00
continue
2014-07-15 13:04:37 +02:00
}
2016-12-17 19:48:33 +00:00
minSequence , err = sendIndexTo ( minSequence , conn , folder , fs , ignores , dbLocation , dropSymlinks )
2015-07-28 21:22:44 +04:00
// Wait a short amount of time before entering the next loop. If there
2015-11-11 21:20:34 -05:00
// are continuous changes happening to the local index, this gives us
2015-07-28 21:22:44 +04:00
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 20:08:04 +02:00
}
}
2014-07-15 13:04:37 +02:00
2016-12-17 19:48:33 +00:00
func sendIndexTo ( minSequence int64 , conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher , dbLocation string , dropSymlinks bool ) ( int64 , error ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-30 20:08:04 +02:00
name := conn . Name ( )
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 20:54:59 +02:00
currentBatchSize := 0
2016-07-29 19:54:24 +00:00
initial := minSequence == 0
maxSequence := minSequence
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2016-07-27 21:38:43 +00:00
sorter := NewIndexSorter ( dbLocation )
2016-07-21 17:21:15 +00:00
defer sorter . Close ( )
2015-01-12 14:50:30 +01:00
fs . WithHave ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
f := fi . ( protocol . FileInfo )
2016-07-29 19:54:24 +00:00
if f . Sequence <= minSequence {
2014-07-30 20:08:04 +02:00
return true
}
2014-07-15 13:04:37 +02:00
2016-07-29 19:54:24 +00:00
if f . Sequence > maxSequence {
maxSequence = f . Sequence
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
2016-12-17 19:48:33 +00:00
if dropSymlinks && f . IsSymlink ( ) {
// Do not send index entries with symlinks to clients that can't
// handle it. Fixes issue #3802. Once both sides are upgraded, a
// rescan (i.e., change) of the symlink is required for it to
// sync again, due to delta indexes.
return true
}
2016-07-21 17:21:15 +00:00
sorter . Append ( f )
return true
} )
sorter . Sorted ( func ( f protocol . FileInfo ) bool {
2014-08-11 20:54:59 +02:00
if len ( batch ) == indexBatchSize || currentBatchSize > indexTargetSize {
2014-07-30 20:08:04 +02:00
if initial {
2016-07-04 10:40:29 +00:00
if err = conn . Index ( folder , batch ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
2015-10-03 17:25:21 +02:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 20:08:04 +02:00
initial = false
} else {
2016-07-04 10:40:29 +00:00
if err = conn . IndexUpdate ( folder , batch ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
2015-10-03 17:25:21 +02:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-03 12:30:10 +02:00
}
2014-01-06 11:11:18 +01:00
2014-07-30 20:08:04 +02:00
batch = make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 20:54:59 +02:00
currentBatchSize = 0
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
batch = append ( batch , f )
2016-07-04 10:40:29 +00:00
currentBatchSize += f . ProtoSize ( )
2014-07-30 20:08:04 +02:00
return true
} )
if initial && err == nil {
2016-07-04 10:40:29 +00:00
err = conn . Index ( folder , batch )
2015-10-03 17:25:21 +02:00
if err == nil {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (small initial index)" , deviceID , name , folder , len ( batch ) )
2014-07-30 20:08:04 +02:00
}
} else if len ( batch ) > 0 && err == nil {
2016-07-04 10:40:29 +00:00
err = conn . IndexUpdate ( folder , batch )
2015-10-03 17:25:21 +02:00
if err == nil {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (last batch)" , deviceID , name , folder , len ( batch ) )
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
2016-07-29 19:54:24 +00:00
return maxSequence , err
2014-01-06 11:11:18 +01:00
}
2016-05-19 00:19:26 +00:00
func ( m * Model ) updateLocalsFromScanning ( folder string , fs [ ] protocol . FileInfo ) {
2016-05-19 07:01:43 +00:00
m . updateLocals ( folder , fs )
m . fmut . RLock ( )
2016-09-28 15:54:13 +00:00
folderCfg := m . folderCfgs [ folder ]
2016-05-19 07:01:43 +00:00
m . fmut . RUnlock ( )
2016-12-21 16:35:20 +00:00
m . diskChangeDetected ( folderCfg , fs , events . LocalChangeDetected )
2016-05-19 00:19:26 +00:00
}
func ( m * Model ) updateLocalsFromPulling ( folder string , fs [ ] protocol . FileInfo ) {
2016-05-19 07:01:43 +00:00
m . updateLocals ( folder , fs )
2016-12-21 16:35:20 +00:00
m . fmut . RLock ( )
folderCfg := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
m . diskChangeDetected ( folderCfg , fs , events . RemoteChangeDetected )
2016-05-19 00:19:26 +00:00
}
2016-05-19 07:01:43 +00:00
func ( m * Model ) updateLocals ( folder string , fs [ ] protocol . FileInfo ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-06-16 08:30:15 +02:00
files := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2016-01-16 21:42:32 +01:00
if files == nil {
// The folder doesn't exist.
return
}
2015-06-16 08:30:15 +02:00
files . Update ( protocol . LocalDeviceID , fs )
2015-12-04 08:41:13 +01:00
filenames := make ( [ ] string , len ( fs ) )
for i , file := range fs {
filenames [ i ] = file . Name
}
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2015-12-04 08:41:13 +01:00
"folder" : folder ,
"items" : len ( fs ) ,
"filenames" : filenames ,
2016-07-29 19:54:24 +00:00
"version" : files . Sequence ( protocol . LocalDeviceID ) ,
2014-07-17 13:38:36 +02:00
} )
2016-05-19 00:19:26 +00:00
}
2016-12-21 16:35:20 +00:00
func ( m * Model ) diskChangeDetected ( folderCfg config . FolderConfiguration , files [ ] protocol . FileInfo , typeOfEvent events . EventType ) {
2016-09-28 15:54:13 +00:00
path := strings . Replace ( folderCfg . Path ( ) , ` \\?\ ` , "" , 1 )
2016-05-19 00:19:26 +00:00
for _ , file := range files {
objType := "file"
2016-05-19 07:01:43 +00:00
action := "modified"
2016-05-19 00:19:26 +00:00
2016-07-15 14:23:20 +00:00
// If our local vector is version 1 AND it is the only version
2016-07-04 10:40:29 +00:00
// vector so far seen for this file then it is a new file. Else if
// it is > 1 it's not new, and if it is 1 but another shortId
// version vector exists then it is new for us but created elsewhere
// so the file is still not new but modified by us. Only if it is
// truly new do we change this to 'added', else we leave it as
// 'modified'.
if len ( file . Version . Counters ) == 1 && file . Version . Counters [ 0 ] . Value == 1 {
2016-05-19 07:01:43 +00:00
action = "added"
2016-05-19 00:19:26 +00:00
}
if file . IsDirectory ( ) {
objType = "dir"
}
if file . IsDeleted ( ) {
2016-05-19 07:01:43 +00:00
action = "deleted"
2016-05-19 00:19:26 +00:00
}
2016-09-28 15:54:13 +00:00
// The full file path, adjusted to the local path separator character. Also
// for windows paths, strip unwanted chars from the front.
2016-07-15 14:23:20 +00:00
path := filepath . Join ( path , filepath . FromSlash ( file . Name ) )
2016-05-19 00:19:26 +00:00
2016-12-21 16:35:20 +00:00
// Two different events can be fired here based on what EventType is passed into function
events . Default . Log ( typeOfEvent , map [ string ] string {
"folderID" : folderCfg . ID ,
"label" : folderCfg . Label ,
"action" : action ,
"type" : objType ,
"path" : path ,
"modifiedBy" : file . ModifiedBy . String ( ) ,
2016-05-19 00:19:26 +00:00
} )
}
2014-03-28 14:36:57 +01:00
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , fromTemporary bool ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
nc , ok := m . conn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 11:11:18 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 11:11:18 +01:00
}
2016-07-23 12:46:31 +00:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x ft=%t" , m , deviceID , folder , name , offset , size , hash , fromTemporary )
2014-01-06 11:11:18 +01:00
2016-04-15 10:59:41 +00:00
return nc . Request ( folder , name , offset , size , hash , fromTemporary )
2014-01-06 11:11:18 +01:00
}
2015-02-11 19:52:59 +01:00
func ( m * Model ) ScanFolders ( ) map [ string ] error {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-13 05:12:01 +09:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 12:00:38 +01:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 23:54:31 +01:00
errorsMut := sync . NewMutex ( )
2015-02-11 19:52:59 +01:00
2015-04-22 23:54:31 +01:00
wg := sync . NewWaitGroup ( )
2014-09-28 12:00:38 +01:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 20:42:12 -03:00
go func ( ) {
2014-09-28 12:00:38 +01:00
err := m . ScanFolder ( folder )
2014-05-28 06:55:30 +02:00
if err != nil {
2015-02-11 19:52:59 +01:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2015-04-13 05:12:01 +09:00
2015-03-28 14:25:42 +00:00
// Potentially sets the error twice, once in the scanner just
// by doing a check, and once here, if the error returned is
// the same one as returned by CheckFolderHealth, though
2015-04-13 05:12:01 +09:00
// duplicate set is handled by setError.
m . fmut . RLock ( )
srv := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
srv . setError ( err )
2014-05-28 06:55:30 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Done ( )
} ( )
2014-04-14 09:58:17 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Wait ( )
2015-02-11 19:52:59 +01:00
return errors
2014-03-29 18:53:48 +01:00
}
2013-12-15 11:43:31 +01:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) ScanFolder ( folder string ) error {
2016-06-29 06:37:34 +00:00
return m . ScanFolderSubdirs ( folder , nil )
2014-08-11 20:20:01 +02:00
}
2016-06-29 06:37:34 +00:00
func ( m * Model ) ScanFolderSubdirs ( folder string , subs [ ] string ) error {
2015-06-20 19:26:25 +02:00
m . fmut . Lock ( )
2017-02-09 20:29:56 +00:00
runner , okRunner := m . folderRunners [ folder ]
cfg , okCfg := m . folderCfgs [ folder ]
2015-06-20 19:26:25 +02:00
m . fmut . Unlock ( )
2017-02-09 20:29:56 +00:00
if ! okRunner {
if okCfg && cfg . Paused {
return errFolderPaused
}
return errFolderMissing
2015-06-20 19:26:25 +02:00
}
return runner . Scan ( subs )
}
2016-06-29 06:37:34 +00:00
func ( m * Model ) internalScanFolderSubdirs ( folder string , subDirs [ ] string ) error {
2016-12-21 10:33:07 +00:00
for i := 0 ; i < len ( subDirs ) ; i ++ {
sub := osutil . NativeFilename ( subDirs [ i ] )
if sub == "" {
// A blank subdirs means to scan the entire folder. We can trim
// the subDirs list and go on our way.
subDirs = nil
break
}
2016-12-01 12:35:11 +00:00
// We test each path by joining with "root". What we join with is
2016-12-16 11:21:22 +00:00
// not relevant, we just want the dotdot escape detection here. For
// historical reasons we may get paths that end in a slash. We
// remove that first to allow the rootedJoinedPath to pass.
sub = strings . TrimRight ( sub , string ( os . PathSeparator ) )
2016-12-01 12:35:11 +00:00
if _ , err := rootedJoinedPath ( "root" , sub ) ; err != nil {
2015-03-27 09:51:18 +01:00
return errors . New ( "invalid subpath" )
}
2016-06-29 06:37:34 +00:00
subDirs [ i ] = sub
2014-08-11 20:20:01 +02:00
}
2014-11-29 22:29:49 +01:00
m . fmut . Lock ( )
2015-03-16 21:14:19 +01:00
fs := m . folderFiles [ folder ]
2014-12-23 13:41:02 +01:00
folderCfg := m . folderCfgs [ folder ]
2014-12-23 10:05:08 +01:00
ignores := m . folderIgnores [ folder ]
2015-03-16 21:14:19 +01:00
runner , ok := m . folderRunners [ folder ]
2014-12-23 13:41:02 +01:00
m . fmut . Unlock ( )
2016-08-05 17:45:45 +00:00
mtimefs := fs . MtimeFS ( )
2014-12-23 13:41:02 +01:00
2016-08-05 07:13:52 +00:00
// Check if the ignore patterns changed as part of scanning this folder.
// If they did we should schedule a pull of the folder so that we
// request things we might have suddenly become unignored and so on.
oldHash := ignores . Hash ( )
defer func ( ) {
if ignores . Hash ( ) != oldHash {
l . Debugln ( "Folder" , folder , "ignore patterns changed; triggering puller" )
runner . IndexUpdated ( )
}
} ( )
2014-12-23 13:41:02 +01:00
if ! ok {
2017-02-09 20:29:56 +00:00
if folderCfg . Paused {
return errFolderPaused
}
return errFolderMissing
2014-12-23 13:41:02 +01:00
}
2015-07-16 12:52:36 +02:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-09-29 18:01:19 +02:00
runner . setError ( err )
2016-11-17 17:12:41 +02:00
l . Infof ( "Stopping folder %s due to error: %s" , folderCfg . Description ( ) , err )
2015-07-16 12:52:36 +02:00
return err
}
2015-09-29 18:01:19 +02:00
if err := ignores . Load ( filepath . Join ( folderCfg . Path ( ) , ".stignore" ) ) ; err != nil && ! os . IsNotExist ( err ) {
err = fmt . Errorf ( "loading ignores: %v" , err )
runner . setError ( err )
2016-11-17 17:12:41 +02:00
l . Infof ( "Stopping folder %s due to error: %s" , folderCfg . Description ( ) , err )
2015-09-29 18:01:19 +02:00
return err
}
2014-09-04 22:29:53 +02:00
2016-03-18 08:28:44 +00:00
// Clean the list of subitems to ensure that we start at a known
// directory, and don't scan subdirectories of things we've already
// scanned.
2016-06-29 06:37:34 +00:00
subDirs = unifySubs ( subDirs , func ( f string ) bool {
2016-03-18 08:28:44 +00:00
_ , ok := fs . Get ( protocol . LocalDeviceID , f )
return ok
} )
2015-03-08 17:33:41 +00:00
2015-11-13 15:00:32 +01:00
// The cancel channel is closed whenever we return (such as from an error),
// to signal the potentially still running walker to stop.
cancel := make ( chan struct { } )
defer close ( cancel )
2016-05-09 18:25:39 +00:00
runner . setState ( FolderScanning )
fchan , err := scanner . Walk ( scanner . Config {
2015-08-26 23:49:06 +01:00
Folder : folderCfg . ID ,
Dir : folderCfg . Path ( ) ,
2016-06-29 06:37:34 +00:00
Subs : subDirs ,
2015-08-26 23:49:06 +01:00
Matcher : ignores ,
BlockSize : protocol . BlockSize ,
TempLifetime : time . Duration ( m . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
CurrentFiler : cFiler { m , folder } ,
2016-08-05 17:45:45 +00:00
Lstater : mtimefs ,
2015-08-26 23:49:06 +01:00
IgnorePerms : folderCfg . IgnorePerms ,
AutoNormalize : folderCfg . AutoNormalize ,
Hashers : m . numHashers ( folder ) ,
ShortID : m . shortID ,
ProgressTickIntervalS : folderCfg . ScanProgressIntervalS ,
2015-11-13 15:00:32 +01:00
Cancel : cancel ,
2017-02-06 10:27:11 +00:00
UseWeakHashes : weakhash . Enabled ,
2016-05-09 18:25:39 +00:00
} )
2014-07-15 14:27:46 +02:00
2014-05-04 18:20:25 +02:00
if err != nil {
2015-06-13 19:10:11 +01:00
// The error we get here is likely an OS level error, which might not be
// as readable as our health check errors. Check if we can get a health
// check error first, and use that if it's available.
if ferr := m . CheckFolderHealth ( folder ) ; ferr != nil {
err = ferr
}
2015-04-13 05:12:01 +09:00
runner . setError ( err )
2014-05-04 18:20:25 +02:00
return err
}
2015-04-13 05:12:01 +09:00
2015-04-17 15:19:40 +09:00
batchSizeFiles := 100
batchSizeBlocks := 2048 // about 256 MB
batch := make ( [ ] protocol . FileInfo , 0 , batchSizeFiles )
blocksHandled := 0
2014-07-15 14:27:46 +02:00
for f := range fchan {
2015-04-17 15:19:40 +09:00
if len ( batch ) == batchSizeFiles || blocksHandled > batchSizeBlocks {
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2016-11-17 17:12:41 +02:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folderCfg . Description ( ) , err )
2015-03-28 14:25:42 +00:00
return err
}
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
batch = batch [ : 0 ]
2015-04-17 15:19:40 +09:00
blocksHandled = 0
2014-07-15 14:27:46 +02:00
}
batch = append ( batch , f )
2015-04-17 15:19:40 +09:00
blocksHandled += len ( f . Blocks )
2014-07-15 14:27:46 +02:00
}
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2016-11-17 17:12:41 +02:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folderCfg . Description ( ) , err )
2015-03-28 14:25:42 +00:00
return err
} else if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
}
2016-06-29 06:37:34 +00:00
if len ( subDirs ) == 0 {
2016-03-18 12:16:33 +00:00
// If we have no specific subdirectories to traverse, set it to one
// empty prefix so we traverse the entire folder contents once.
2016-06-29 06:37:34 +00:00
subDirs = [ ] string { "" }
2016-03-18 12:16:33 +00:00
}
2014-09-04 22:29:53 +02:00
2016-09-02 13:23:24 +00:00
// Do a scan of the database for each prefix, to check for deleted and
// ignored files.
2016-03-18 12:16:33 +00:00
batch = batch [ : 0 ]
2016-06-29 06:37:34 +00:00
for _ , sub := range subDirs {
2016-03-18 12:16:33 +00:00
var iterError error
fs . WithPrefixedHaveTruncated ( protocol . LocalDeviceID , sub , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
2016-09-02 13:23:24 +00:00
if len ( batch ) == batchSizeFiles {
if err := m . CheckFolderHealth ( folder ) ; err != nil {
iterError = err
return false
}
m . updateLocalsFromScanning ( folder , batch )
batch = batch [ : 0 ]
}
switch {
2017-02-07 08:34:24 +00:00
case ! f . IsInvalid ( ) && ignores . Match ( f . Name ) . IsIgnored ( ) :
// File was valid at last pass but has been ignored. Set invalid bit.
2016-09-02 13:23:24 +00:00
l . Debugln ( "setting invalid bit on ignored" , f )
nf := protocol . FileInfo {
Name : f . Name ,
Type : f . Type ,
Size : f . Size ,
ModifiedS : f . ModifiedS ,
ModifiedNs : f . ModifiedNs ,
2016-12-21 16:35:20 +00:00
ModifiedBy : m . id . Short ( ) ,
2016-09-02 13:23:24 +00:00
Permissions : f . Permissions ,
NoPermissions : f . NoPermissions ,
Invalid : true ,
Version : f . Version , // The file is still the same, so don't bump version
2014-09-04 22:29:53 +02:00
}
2016-09-02 13:23:24 +00:00
batch = append ( batch , nf )
2016-03-18 12:16:33 +00:00
2016-09-02 13:23:24 +00:00
case ! f . IsInvalid ( ) && ! f . IsDeleted ( ) :
// The file is valid and not deleted. Lets check if it's
// still here.
2016-03-18 12:16:33 +00:00
2016-12-23 11:10:58 +01:00
if _ , err := mtimefs . Lstat ( filepath . Join ( folderCfg . Path ( ) , f . Name ) ) ; err != nil {
2016-03-18 12:16:33 +00:00
// We don't specifically verify that the error is
// os.IsNotExist because there is a corner case when a
// directory is suddenly transformed into a file. When that
// happens, files that were in the directory (that is now a
// file) are deleted but will return a confusing error ("not a
// directory") when we try to Lstat() them.
nf := protocol . FileInfo {
2016-08-06 13:05:59 +00:00
Name : f . Name ,
Type : f . Type ,
2016-09-02 06:45:46 +00:00
Size : 0 ,
2016-08-06 13:05:59 +00:00
ModifiedS : f . ModifiedS ,
ModifiedNs : f . ModifiedNs ,
2016-12-21 16:35:20 +00:00
ModifiedBy : m . id . Short ( ) ,
2016-08-06 13:05:59 +00:00
Deleted : true ,
Version : f . Version . Update ( m . shortID ) ,
2016-03-18 12:16:33 +00:00
}
2016-04-04 12:53:55 +02:00
2016-03-18 12:16:33 +00:00
batch = append ( batch , nf )
2014-08-12 13:53:31 +02:00
}
2014-07-15 14:27:46 +02:00
}
2016-03-18 12:16:33 +00:00
return true
} )
2015-05-27 22:46:10 +01:00
2016-03-18 12:16:33 +00:00
if iterError != nil {
2016-11-17 17:12:41 +02:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folderCfg . Description ( ) , iterError )
2016-03-18 12:16:33 +00:00
return iterError
}
2015-05-27 22:46:10 +01:00
}
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2016-11-17 17:12:41 +02:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folderCfg . Description ( ) , err )
2015-05-27 22:46:10 +01:00
return err
} else if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
}
2016-06-02 19:26:52 +00:00
m . folderStatRef ( folder ) . ScanCompleted ( )
2015-04-13 05:12:01 +09:00
runner . setState ( FolderIdle )
2014-05-04 18:20:25 +02:00
return nil
2014-03-29 18:53:48 +01:00
}
2015-05-01 14:30:17 +02:00
func ( m * Model ) DelayScan ( folder string , next time . Duration ) {
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 20:46:32 +02:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
func ( m * Model ) numHashers ( folder string ) int {
m . fmut . Lock ( )
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
m . fmut . Unlock ( )
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 10:05:06 +02:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 20:46:32 +02:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2015-11-17 12:08:53 +01:00
// generateClusterConfig returns a ClusterConfigMessage that is correct for
// the given peer device
2016-07-04 10:40:29 +00:00
func ( m * Model ) generateClusterConfig ( device protocol . DeviceID ) protocol . ClusterConfig {
var message protocol . ClusterConfig
2014-04-13 15:28:26 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-11-17 07:45:45 +01:00
// The list of folders in the message is sorted, so we always get the
// same order.
folders := m . deviceFolders [ device ]
sort . Strings ( folders )
for _ , folder := range folders {
2015-09-27 12:11:34 +01:00
folderCfg := m . cfg . Folders ( ) [ folder ]
2016-07-23 12:46:31 +00:00
fs := m . folderFiles [ folder ]
2016-03-11 09:48:46 +00:00
protocolFolder := protocol . Folder {
2016-07-04 10:40:29 +00:00
ID : folder ,
Label : folderCfg . Label ,
2016-12-16 22:23:35 +00:00
ReadOnly : folderCfg . Type == config . FolderTypeSendOnly ,
2016-07-04 10:40:29 +00:00
IgnorePermissions : folderCfg . IgnorePerms ,
IgnoreDelete : folderCfg . IgnoreDelete ,
DisableTempIndexes : folderCfg . DisableTempIndexes ,
2016-12-21 18:41:25 +00:00
Paused : folderCfg . Paused ,
2015-09-27 12:11:34 +01:00
}
2016-07-04 10:40:29 +00:00
2016-11-17 07:45:45 +01:00
// Devices are sorted, so we always get the same order.
for _ , device := range m . folderDevices . sortedDevices ( folder ) {
2015-09-27 11:39:02 +01:00
deviceCfg := m . cfg . Devices ( ) [ device ]
2016-07-23 12:46:31 +00:00
var indexID protocol . IndexID
2016-07-29 19:54:24 +00:00
var maxSequence int64
2016-07-23 12:46:31 +00:00
if device == m . id {
indexID = fs . IndexID ( protocol . LocalDeviceID )
2016-07-29 19:54:24 +00:00
maxSequence = fs . Sequence ( protocol . LocalDeviceID )
2016-07-23 12:46:31 +00:00
} else {
indexID = fs . IndexID ( device )
2016-07-29 19:54:24 +00:00
maxSequence = fs . Sequence ( device )
2016-07-23 12:46:31 +00:00
}
2016-03-11 09:48:46 +00:00
protocolDevice := protocol . Device {
2016-10-29 21:56:24 +00:00
ID : device ,
2016-07-29 19:54:24 +00:00
Name : deviceCfg . Name ,
Addresses : deviceCfg . Addresses ,
Compression : deviceCfg . Compression ,
CertName : deviceCfg . CertName ,
Introducer : deviceCfg . Introducer ,
IndexID : indexID ,
MaxSequence : maxSequence ,
2014-09-23 16:04:20 +02:00
}
2015-09-27 11:39:02 +01:00
2016-03-11 09:48:46 +00:00
protocolFolder . Devices = append ( protocolFolder . Devices , protocolDevice )
2014-01-09 13:58:35 +01:00
}
2016-03-11 09:48:46 +00:00
message . Folders = append ( message . Folders , protocolFolder )
2013-12-29 20:33:57 -05:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-13 15:28:26 +02:00
2016-03-11 09:48:46 +00:00
return message
2013-12-29 20:33:57 -05:00
}
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
func ( m * Model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 21:14:19 +01:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-13 05:12:01 +09:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 21:14:19 +01:00
}
2015-04-13 05:12:01 +09:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 09:58:17 +02:00
}
2014-06-16 10:47:02 +02:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) Override ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2015-03-16 21:14:19 +01:00
runner := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return
}
2014-06-23 11:52:13 +02:00
2015-03-16 21:14:19 +01:00
runner . setState ( FolderScanning )
2014-07-15 17:54:00 +02:00
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2015-01-12 14:50:30 +01:00
fs . WithNeed ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
need := fi . ( protocol . FileInfo )
2014-07-15 17:54:00 +02:00
if len ( batch ) == indexBatchSize {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 17:54:00 +02:00
batch = batch [ : 0 ]
}
2015-01-06 22:12:45 +01:00
have , ok := fs . Get ( protocol . LocalDeviceID , need . Name )
if ! ok || have . Name != need . Name {
2014-06-16 10:47:02 +02:00
// We are missing the file
2016-07-04 10:40:29 +00:00
need . Deleted = true
2014-07-15 17:54:00 +02:00
need . Blocks = nil
2015-04-02 10:21:11 +02:00
need . Version = need . Version . Update ( m . shortID )
2016-09-02 06:45:46 +00:00
need . Size = 0
2014-06-16 10:47:02 +02:00
} else {
// We have the file, replace with our version
2015-04-02 10:21:11 +02:00
have . Version = have . Version . Merge ( need . Version ) . Update ( m . shortID )
2014-07-15 17:54:00 +02:00
need = have
2014-06-16 10:47:02 +02:00
}
2016-07-29 19:54:24 +00:00
need . Sequence = 0
2014-07-15 17:54:00 +02:00
batch = append ( batch , need )
return true
} )
if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-06-16 10:47:02 +02:00
}
2015-03-16 21:14:19 +01:00
runner . setState ( FolderIdle )
2014-06-16 10:47:02 +02:00
}
2014-06-20 00:27:54 +02:00
2016-07-29 19:54:24 +00:00
// CurrentSequence returns the change version for the given folder.
2014-09-28 12:00:38 +01:00
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 14:44:15 +02:00
// changed.
2016-07-29 19:54:24 +00:00
func ( m * Model ) CurrentSequence ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
2014-10-12 10:36:04 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-09-27 14:44:15 +02:00
}
2016-07-29 19:54:24 +00:00
return fs . Sequence ( protocol . LocalDeviceID ) , true
2014-09-27 14:44:15 +02:00
}
2016-07-29 19:54:24 +00:00
// RemoteSequence returns the change version for the given folder, as
2014-09-27 14:44:15 +02:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 12:00:38 +01:00
// the remote or global folder has changed.
2016-07-29 19:54:24 +00:00
func ( m * Model ) RemoteSequence ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 17:54:00 +02:00
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-07-15 17:54:00 +02:00
if ! ok {
2014-10-24 14:54:36 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-07-15 17:54:00 +02:00
}
2015-01-18 02:12:06 +01:00
var ver int64
2016-11-07 16:40:48 +00:00
for device := range m . folderDevices [ folder ] {
ver += fs . Sequence ( device )
2014-06-20 00:27:54 +02:00
}
2015-06-24 08:52:38 +01:00
return ver , true
2014-06-20 00:27:54 +02:00
}
2014-09-27 14:44:15 +02:00
2015-02-07 10:52:42 +00:00
func ( m * Model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
files . WithPrefixedGlobalTruncated ( prefix , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if f . IsInvalid ( ) || f . IsDeleted ( ) || f . Name == prefix {
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 22:37:04 +09:00
last [ base ] = [ ] interface { } {
2016-08-06 13:05:59 +00:00
f . ModTime ( ) , f . FileSize ( ) ,
2015-02-07 10:52:42 +00:00
}
}
return true
} )
return output
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) Availability ( folder , file string , version protocol . Vector , block protocol . BlockInfo ) [ ] Availability {
2016-11-08 06:38:50 +00:00
// The slightly unusual locking sequence here is because we need to hold
// pmut for the duration (as the value returned from foldersFiles can
// get heavily modified on Close()), but also must acquire fmut before
// pmut. (The locks can be *released* in any order.)
m . fmut . RLock ( )
2014-10-31 23:41:18 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2016-04-15 10:59:41 +00:00
devices := m . folderDevices [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2016-11-08 06:38:50 +00:00
2014-09-27 14:44:15 +02:00
if ! ok {
return nil
}
2016-04-15 10:59:41 +00:00
var availabilities [ ] Availability
2016-12-21 18:41:25 +00:00
next :
2014-10-31 23:41:18 +00:00
for _ , device := range fs . Availability ( file ) {
2016-12-21 18:41:25 +00:00
for _ , pausedFolder := range m . remotePausedFolders [ device ] {
if pausedFolder == folder {
continue next
}
}
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ device ]
2014-10-31 23:41:18 +00:00
if ok {
2016-04-15 10:59:41 +00:00
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : false } )
2014-10-31 23:41:18 +00:00
}
}
2016-04-15 10:59:41 +00:00
2016-11-07 16:40:48 +00:00
for device := range devices {
2016-04-15 10:59:41 +00:00
if m . deviceDownloads [ device ] . Has ( folder , file , version , int32 ( block . Offset / protocol . BlockSize ) ) {
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : true } )
}
}
return availabilities
2014-09-27 14:44:15 +02:00
}
2015-04-28 22:32:10 +02:00
// BringToFront bumps the given files priority in the job queue.
2014-12-30 09:35:21 +01:00
func ( m * Model ) BringToFront ( folder , file string ) {
2014-12-01 19:23:06 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 09:35:21 +01:00
runner . BringToFront ( file )
2014-12-01 19:23:06 +00:00
}
}
2015-04-28 22:32:10 +02:00
// CheckFolderHealth checks the folder for common errors and returns the
// current folder error, or nil if the folder is healthy.
2015-03-28 14:25:42 +00:00
func ( m * Model ) CheckFolderHealth ( id string ) error {
2015-11-13 15:00:32 +01:00
folder , ok := m . cfg . Folders ( ) [ id ]
if ! ok {
2017-02-09 20:29:56 +00:00
return errFolderMissing
2015-11-13 15:00:32 +01:00
}
2016-06-26 10:07:27 +00:00
// Check for folder errors, with the most serious and specific first and
// generic ones like out of space on the home disk later. Note the
// inverted error flow (err==nil checks) here.
err := m . checkFolderPath ( folder )
if err == nil {
err = m . checkFolderFreeSpace ( folder )
}
if err == nil {
err = m . checkHomeDiskFree ( )
2015-07-16 12:52:36 +02:00
}
2016-06-26 10:07:27 +00:00
// Set or clear the error on the runner, which also does logging and
// generates events and stuff.
m . runnerExchangeError ( folder , err )
2015-09-06 08:29:10 +02:00
2016-06-26 10:07:27 +00:00
return err
}
2015-09-06 08:29:10 +02:00
2016-06-26 10:07:27 +00:00
// checkFolderPath returns nil if the folder path exists and has the marker file.
func ( m * Model ) checkFolderPath ( folder config . FolderConfiguration ) error {
2016-07-02 19:38:39 +00:00
if folder . Path ( ) == "" {
return errFolderPathEmpty
}
2016-06-26 10:07:27 +00:00
if fi , err := os . Stat ( folder . Path ( ) ) ; err != nil || ! fi . IsDir ( ) {
return errFolderPathMissing
}
2015-09-06 08:29:10 +02:00
2016-06-26 10:07:27 +00:00
if ! folder . HasMarker ( ) {
return errFolderMarkerMissing
}
2015-09-06 08:29:10 +02:00
2016-06-26 10:07:27 +00:00
return nil
}
// checkFolderFreeSpace returns nil if the folder has the required amount of
// free space, or if folder free space checking is disabled.
func ( m * Model ) checkFolderFreeSpace ( folder config . FolderConfiguration ) error {
if folder . MinDiskFreePct <= 0 {
return nil
}
free , err := osutil . DiskFreePercentage ( folder . Path ( ) )
if err == nil && free < folder . MinDiskFreePct {
return errFolderNoSpace
}
return nil
}
// checkHomeDiskFree returns nil if the home disk has the required amount of
// free space, or if home disk free space checking is disabled.
func ( m * Model ) checkHomeDiskFree ( ) error {
minFree := m . cfg . Options ( ) . MinHomeDiskFreePct
if minFree <= 0 {
return nil
}
free , err := osutil . DiskFreePercentage ( m . cfg . ConfigPath ( ) )
if err == nil && free < minFree {
return errHomeDiskNoSpace
2015-03-28 14:25:42 +00:00
}
2016-06-26 10:07:27 +00:00
return nil
}
// runnerExchangeError sets the given error (which way be nil) on the folder
// runner. If the error differs from any previous error, logging and events
// happen.
func ( m * Model ) runnerExchangeError ( folder config . FolderConfiguration , err error ) {
2015-04-13 05:12:01 +09:00
m . fmut . RLock ( )
2015-04-25 15:27:45 +09:00
runner , runnerExists := m . folderRunners [ folder . ID ]
2015-04-13 05:12:01 +09:00
m . fmut . RUnlock ( )
2015-04-25 15:27:45 +09:00
var oldErr error
if runnerExists {
_ , _ , oldErr = runner . getState ( )
}
2015-03-28 14:25:42 +00:00
2015-04-13 05:12:01 +09:00
if err != nil {
if oldErr != nil && oldErr . Error ( ) != err . Error ( ) {
2016-11-17 17:12:41 +02:00
l . Infof ( "Folder %s error changed: %q -> %q" , folder . Description ( ) , oldErr , err )
2015-04-13 05:12:01 +09:00
} else if oldErr == nil {
2016-11-17 17:12:41 +02:00
l . Warnf ( "Stopping folder %s - %v" , folder . Description ( ) , err )
2015-03-28 14:25:42 +00:00
}
2015-04-25 15:27:45 +09:00
if runnerExists {
runner . setError ( err )
}
2015-04-13 05:12:01 +09:00
} else if oldErr != nil {
l . Infof ( "Folder %q error is cleared, restarting" , folder . ID )
2015-04-25 15:27:45 +09:00
if runnerExists {
2015-06-13 19:10:11 +01:00
runner . clearError ( )
2015-04-25 15:27:45 +09:00
}
2015-03-28 14:25:42 +00:00
}
}
2015-06-21 09:35:41 +02:00
func ( m * Model ) ResetFolder ( folder string ) {
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 20:06:03 +02:00
}
2014-09-27 14:44:15 +02:00
func ( m * Model ) String ( ) string {
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 14:43:01 +02:00
2015-06-03 09:47:39 +02:00
func ( m * Model ) VerifyConfiguration ( from , to config . Configuration ) error {
return nil
}
func ( m * Model ) CommitConfiguration ( from , to config . Configuration ) bool {
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 09:02:55 +02:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 16:13:53 +02:00
for folderID , cfg := range toFolders {
2015-07-22 09:02:55 +02:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 16:13:53 +02:00
// A folder was added.
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "adding folder" , folderID )
2015-07-23 16:13:53 +02:00
m . AddFolder ( cfg )
2016-05-04 10:47:33 +00:00
m . StartFolder ( folderID )
2015-07-22 09:02:55 +02:00
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
2015-11-13 13:30:52 +01:00
// The folder was removed.
m . RemoveFolder ( folderID )
continue
2015-07-22 09:02:55 +02:00
}
2016-08-07 16:21:59 +00:00
// This folder exists on both sides. Settings might have changed.
// Check if anything differs, apart from the label.
toCfgCopy := toCfg
fromCfgCopy := fromCfg
fromCfgCopy . Label = ""
toCfgCopy . Label = ""
2015-07-22 09:02:55 +02:00
2016-08-07 16:21:59 +00:00
if ! reflect . DeepEqual ( fromCfgCopy , toCfgCopy ) {
m . RestartFolder ( toCfg )
2015-07-22 09:02:55 +02:00
}
2016-12-21 18:41:25 +00:00
// Emit the folder pause/resume event
if fromCfg . Paused != toCfg . Paused {
eventType := events . FolderResumed
if toCfg . Paused {
eventType = events . FolderPaused
}
events . Default . Log ( eventType , map [ string ] string { "id" : toCfg . ID , "label" : toCfg . Label } )
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
2016-08-07 16:21:59 +00:00
// Removing a device. We actually don't need to do anything.
// Because folder config has changed (since the device lists do not match)
// Folders for that had device got "restarted", which involves killing
// connections to all devices that we were sharing the folder with.
// At some point model.Close() will get called for that device which will
// clean residue device state that is not part of any folder.
2015-06-03 09:47:39 +02:00
2016-12-21 18:41:25 +00:00
// Pausing a device, unpausing is handled by the connection service.
fromDevices := mapDeviceConfigs ( from . Devices )
toDevices := mapDeviceConfigs ( to . Devices )
for deviceID , toCfg := range toDevices {
fromCfg , ok := fromDevices [ deviceID ]
if ! ok || fromCfg . Paused == toCfg . Paused {
continue
}
if toCfg . Paused {
l . Infoln ( "Pausing" , deviceID )
m . close ( deviceID )
events . Default . Log ( events . DevicePaused , map [ string ] string { "device" : deviceID . String ( ) } )
} else {
events . Default . Log ( events . DeviceResumed , map [ string ] string { "device" : deviceID . String ( ) } )
}
}
2016-01-18 10:06:31 -08:00
// Some options don't require restart as those components handle it fine
// by themselves.
from . Options . URAccepted = to . Options . URAccepted
from . Options . URUniqueID = to . Options . URUniqueID
2016-05-09 11:30:19 +00:00
from . Options . ListenAddresses = to . Options . ListenAddresses
2016-05-17 00:05:38 +00:00
from . Options . RelaysEnabled = to . Options . RelaysEnabled
2016-08-02 08:07:30 +00:00
from . Options . UnackedNotificationIDs = to . Options . UnackedNotificationIDs
2017-01-02 11:29:20 +00:00
from . Options . MaxRecvKbps = to . Options . MaxRecvKbps
from . Options . MaxSendKbps = to . Options . MaxSendKbps
from . Options . LimitBandwidthInLan = to . Options . LimitBandwidthInLan
2017-03-07 12:44:16 +00:00
from . Options . StunKeepaliveS = to . Options . StunKeepaliveS
from . Options . StunServers = to . Options . StunServers
2016-01-18 10:06:31 -08:00
// All of the other generic options require restart. Or at least they may;
// removing this check requires going through those options carefully and
// making sure there are individual services that handle them correctly.
// This code is the "original" requires-restart check and protects other
// components that haven't yet been converted to VerifyConfig/CommitConfig
// handling.
2015-06-03 09:47:39 +02:00
if ! reflect . DeepEqual ( from . Options , to . Options ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, options differ" )
2015-06-03 09:47:39 +02:00
return false
}
return true
}
2015-07-22 09:02:55 +02:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
2016-12-21 18:41:25 +00:00
// mapDeviceConfigs returns a map of device ID to device configuration for the given
// slice of folder configurations.
func mapDeviceConfigs ( devices [ ] config . DeviceConfiguration ) map [ protocol . DeviceID ] config . DeviceConfiguration {
m := make ( map [ protocol . DeviceID ] config . DeviceConfiguration , len ( devices ) )
for _ , dev := range devices {
m [ dev . DeviceID ] = dev
}
return m
}
2015-04-25 22:53:44 +01:00
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk ( data [ ] string , skip , get int ) ( [ ] string , int , int ) {
l := len ( data )
if l <= skip {
return [ ] string { } , skip - l , get
} else if l < skip + get {
return data [ skip : l ] , 0 , get - ( l - skip )
}
return data [ skip : skip + get ] , 0 , 0
}
2015-07-22 09:02:55 +02:00
func closeRawConn ( conn io . Closer ) error {
if conn , ok := conn . ( * tls . Conn ) ; ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline set.
conn . SetWriteDeadline ( time . Now ( ) . Add ( 250 * time . Millisecond ) )
}
return conn . Close ( )
}
2015-11-13 13:30:52 +01:00
func stringSliceWithout ( ss [ ] string , s string ) [ ] string {
for i := range ss {
if ss [ i ] == s {
copy ( ss [ i : ] , ss [ i + 1 : ] )
ss = ss [ : len ( ss ) - 1 ]
return ss
}
}
return ss
}
2016-03-18 08:28:44 +00:00
2016-04-15 10:59:41 +00:00
func readOffsetIntoBuf ( file string , offset int64 , buf [ ] byte ) error {
fd , err := os . Open ( file )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.Open" , file , err )
return err
}
defer fd . Close ( )
_ , err = fd . ReadAt ( buf , offset )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.ReadAt" , file , err )
}
return err
}
2016-04-09 11:25:06 +00:00
// The exists function is expected to return true for all known paths
// (excluding "" and ".")
2016-03-18 08:28:44 +00:00
func unifySubs ( dirs [ ] string , exists func ( dir string ) bool ) [ ] string {
2016-04-09 11:25:06 +00:00
subs := trimUntilParentKnown ( dirs , exists )
sort . Strings ( subs )
return simplifySortedPaths ( subs )
}
2016-03-18 08:28:44 +00:00
2016-04-09 11:25:06 +00:00
func trimUntilParentKnown ( dirs [ ] string , exists func ( dir string ) bool ) [ ] string {
var subs [ ] string
2016-03-18 08:28:44 +00:00
for _ , sub := range dirs {
2016-12-01 14:00:11 +00:00
for sub != "" && ! ignore . IsInternal ( sub ) {
2016-04-09 11:25:06 +00:00
sub = filepath . Clean ( sub )
parent := filepath . Dir ( sub )
if parent == "." || exists ( parent ) {
2016-03-18 08:28:44 +00:00
break
}
2016-04-09 11:25:06 +00:00
sub = parent
2016-03-18 08:28:44 +00:00
if sub == "." || sub == string ( filepath . Separator ) {
// Shortcut. We are going to scan the full folder, so we can
// just return an empty list of subs at this point.
return nil
}
}
2016-04-09 11:25:06 +00:00
if sub == "" {
return nil
}
2016-03-18 08:28:44 +00:00
subs = append ( subs , sub )
}
2016-04-09 11:25:06 +00:00
return subs
}
2016-03-18 08:28:44 +00:00
2016-04-09 11:25:06 +00:00
func simplifySortedPaths ( subs [ ] string ) [ ] string {
2016-03-18 08:28:44 +00:00
var cleaned [ ] string
next :
for _ , sub := range subs {
for _ , existing := range cleaned {
if sub == existing || strings . HasPrefix ( sub , existing + string ( os . PathSeparator ) ) {
continue next
}
}
cleaned = append ( cleaned , sub )
}
return cleaned
}
2016-05-01 06:49:29 +00:00
// makeForgetUpdate takes an index update and constructs a download progress update
// causing to forget any progress for files which we've just been sent.
func makeForgetUpdate ( files [ ] protocol . FileInfo ) [ ] protocol . FileDownloadProgressUpdate {
updates := make ( [ ] protocol . FileDownloadProgressUpdate , 0 , len ( files ) )
for _ , file := range files {
if file . IsSymlink ( ) || file . IsDirectory ( ) || file . IsDeleted ( ) {
continue
}
updates = append ( updates , protocol . FileDownloadProgressUpdate {
Name : file . Name ,
Version : file . Version ,
UpdateType : protocol . UpdateTypeForget ,
} )
}
return updates
}
2016-08-05 07:13:52 +00:00
// shouldIgnore returns true when a file should be excluded from processing
2017-01-17 07:33:48 +00:00
func shouldIgnore ( file db . FileIntf , matcher * ignore . Matcher , ignoreDelete bool ) bool {
2016-08-05 07:13:52 +00:00
switch {
case ignoreDelete && file . IsDeleted ( ) :
// ignoreDelete first because it's a very cheap test so a win if it
// succeeds, and we might in the long run accumulate quite a few
// deleted files.
return true
2017-01-17 07:33:48 +00:00
case matcher . ShouldIgnore ( file . FileName ( ) ) :
2016-08-05 07:13:52 +00:00
return true
}
return false
}
2016-11-07 16:40:48 +00:00
// folderDeviceSet is a set of (folder, deviceID) pairs
type folderDeviceSet map [ string ] map [ protocol . DeviceID ] struct { }
// set adds the (dev, folder) pair to the set
func ( s folderDeviceSet ) set ( dev protocol . DeviceID , folder string ) {
devs , ok := s [ folder ]
if ! ok {
devs = make ( map [ protocol . DeviceID ] struct { } )
s [ folder ] = devs
}
devs [ dev ] = struct { } { }
}
// has returns true if the (dev, folder) pair is in the set
func ( s folderDeviceSet ) has ( dev protocol . DeviceID , folder string ) bool {
_ , ok := s [ folder ] [ dev ]
return ok
}
// hasDevice returns true if the device is set on any folder
func ( s folderDeviceSet ) hasDevice ( dev protocol . DeviceID ) bool {
for _ , devices := range s {
if _ , ok := devices [ dev ] ; ok {
return true
}
}
return false
}
2016-11-17 07:45:45 +01:00
// sortedDevices returns the list of devices for a given folder, sorted
func ( s folderDeviceSet ) sortedDevices ( folder string ) [ ] protocol . DeviceID {
devs := make ( [ ] protocol . DeviceID , 0 , len ( s [ folder ] ) )
for dev := range s [ folder ] {
devs = append ( devs , dev )
}
sort . Sort ( protocol . DeviceIDs ( devs ) )
return devs
}
2016-12-01 12:35:11 +00:00
// rootedJoinedPath takes a root and a supposedly relative path inside that
// root and returns the joined path. An error is returned if the joined path
// is not in fact inside the root.
func rootedJoinedPath ( root , rel string ) ( string , error ) {
// The root must not be empty.
if root == "" {
return "" , errInvalidFilename
}
pathSep := string ( os . PathSeparator )
// The expected prefix for the resulting path is the root, with a path
// separator at the end.
expectedPrefix := filepath . FromSlash ( root )
if ! strings . HasSuffix ( expectedPrefix , pathSep ) {
expectedPrefix += pathSep
}
// The relative path should be clean from internal dotdots and similar
// funkyness.
rel = filepath . FromSlash ( rel )
if filepath . Clean ( rel ) != rel {
return "" , errInvalidFilename
}
// It is not acceptable to attempt to traverse upwards or refer to the
// root itself.
switch rel {
case "." , ".." , pathSep :
return "" , errNotRelative
}
if strings . HasPrefix ( rel , ".." + pathSep ) {
return "" , errNotRelative
}
if strings . HasPrefix ( rel , pathSep + pathSep ) {
// The relative path may pretend to be an absolute path within the
// root, but the double path separator on Windows implies something
// else. It would get cleaned by the Join below, but it's out of
// spec anyway.
return "" , errNotRelative
}
// The supposedly correct path is the one filepath.Join will return, as
// it does cleaning and so on. Check that one first to make sure no
// obvious escape attempts have been made.
joined := filepath . Join ( root , rel )
if ! strings . HasPrefix ( joined , expectedPrefix ) {
return "" , errNotRelative
}
return joined , nil
}