2014-11-16 21:13:20 +01:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 21:43:32 +02:00
//
2015-03-07 21:36:35 +01:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
2014-06-01 22:50:14 +02:00
2014-05-15 00:26:55 -03:00
package model
2013-12-15 11:43:31 +01:00
import (
2014-09-14 23:03:53 +01:00
"bufio"
2014-09-10 08:48:15 +02:00
"crypto/tls"
2015-03-10 23:45:43 +01:00
"encoding/json"
2014-01-06 21:31:36 +01:00
"errors"
2013-12-23 12:12:44 -05:00
"fmt"
2013-12-31 21:22:49 -05:00
"io"
2014-01-05 23:54:57 +01:00
"net"
2013-12-15 11:43:31 +01:00
"os"
2014-03-28 14:36:57 +01:00
"path/filepath"
2015-06-03 09:47:39 +02:00
"reflect"
2015-04-29 20:46:32 +02:00
"runtime"
2014-08-11 20:20:01 +02:00
"strings"
2015-04-22 23:54:31 +01:00
stdsync "sync"
2013-12-15 11:43:31 +01:00
"time"
2014-06-21 09:43:12 +02:00
2015-01-13 13:22:56 +01:00
"github.com/syncthing/protocol"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/symlinks"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/versioner"
2014-07-06 14:46:48 +02:00
"github.com/syndtr/goleveldb/leveldb"
2015-06-12 13:04:00 +02:00
"github.com/thejerf/suture"
2013-12-15 11:43:31 +01:00
)
2014-07-15 13:04:37 +02:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 20:54:59 +02:00
const (
2015-05-25 11:05:12 +02:00
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
indexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
reqValidationTime = time . Hour // How long to cache validation entries for Request messages
reqValidationCacheSize = 1000 // How many entries to aim for in the validation cache size
2014-08-11 20:54:59 +02:00
)
2014-07-15 13:04:37 +02:00
2014-09-30 17:52:05 +02:00
type service interface {
Serve ( )
Stop ( )
2014-12-30 09:31:34 +01:00
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2014-12-30 09:35:21 +01:00
BringToFront ( string )
2015-05-01 14:30:17 +02:00
DelayScan ( d time . Duration )
2015-05-07 22:45:07 +02:00
IndexUpdated ( ) // Remote index was updated notification
2015-06-20 19:26:25 +02:00
Scan ( subs [ ] string ) error
2015-03-16 21:14:19 +01:00
2015-04-13 05:12:01 +09:00
setState ( state folderState )
setError ( err error )
2015-06-13 19:10:11 +01:00
clearError ( )
2015-04-13 05:12:01 +09:00
getState ( ) ( folderState , time . Time , error )
2014-09-30 17:52:05 +02:00
}
2013-12-15 11:43:31 +01:00
type Model struct {
2015-06-12 13:04:00 +02:00
* suture . Supervisor
2015-07-23 16:13:53 +02:00
cfg * config . Wrapper
db * leveldb . DB
finder * db . BlockFinder
progressEmitter * ProgressEmitter
id protocol . DeviceID
shortID uint64
cacheIgnoredFiles bool
2014-05-15 00:26:55 -03:00
2014-09-28 12:05:25 +01:00
deviceName string
2014-05-15 00:26:55 -03:00
clientName string
clientVersion string
2014-09-28 12:05:25 +01:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
2015-01-12 14:52:24 +01:00
folderFiles map [ string ] * db . FileSet // folder -> files
2014-09-28 12:05:25 +01:00
folderDevices map [ string ] [ ] protocol . DeviceID // folder -> deviceIDs
deviceFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
2014-09-28 12:00:38 +01:00
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
2014-10-12 22:35:15 +01:00
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
2014-09-30 17:52:05 +02:00
folderRunners map [ string ] service // folder -> puller or scanner
2014-12-07 20:21:12 +00:00
folderStatRefs map [ string ] * stats . FolderStatisticsReference // folder -> statsRef
2014-09-28 12:39:39 +01:00
fmut sync . RWMutex // protects the above
2014-03-29 18:53:48 +01:00
2015-08-23 21:56:10 +02:00
conn map [ protocol . DeviceID ] Connection
deviceVer map [ protocol . DeviceID ] string
devicePaused map [ protocol . DeviceID ] bool
pmut sync . RWMutex // protects the above
2013-12-30 09:30:29 -05:00
2015-05-25 11:05:12 +02:00
reqValidationCache map [ string ] time . Time // folder / file name => time when confirmed to exist
rvmut sync . RWMutex // protects reqValidationCache
2013-12-15 11:43:31 +01:00
}
2014-01-07 22:44:21 +01:00
var (
2015-06-15 00:44:24 +02:00
symlinkWarning = stdsync . Once { }
2014-01-07 22:44:21 +01:00
)
2014-01-06 21:31:36 +01:00
2014-01-06 11:11:18 +01:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 12:00:38 +01:00
// for file data without altering the local folder in any way.
2015-03-25 22:37:35 +01:00
func NewModel ( cfg * config . Wrapper , id protocol . DeviceID , deviceName , clientName , clientVersion string , ldb * leveldb . DB ) * Model {
2013-12-15 11:43:31 +01:00
m := & Model {
2015-07-11 11:12:20 +10:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
if debug {
l . Debugln ( line )
}
} ,
} ) ,
2015-05-25 11:05:12 +02:00
cfg : cfg ,
db : ldb ,
2015-09-04 12:01:00 +02:00
finder : db . NewBlockFinder ( ldb ) ,
2015-05-25 11:05:12 +02:00
progressEmitter : NewProgressEmitter ( cfg ) ,
id : id ,
shortID : id . Short ( ) ,
2015-07-23 16:13:53 +02:00
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
2015-05-25 11:05:12 +02:00
deviceName : deviceName ,
clientName : clientName ,
clientVersion : clientVersion ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
folderDevices : make ( map [ string ] [ ] protocol . DeviceID ) ,
deviceFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderStatRefs : make ( map [ string ] * stats . FolderStatisticsReference ) ,
2015-06-28 16:05:29 +01:00
conn : make ( map [ protocol . DeviceID ] Connection ) ,
2015-05-25 11:05:12 +02:00
deviceVer : make ( map [ protocol . DeviceID ] string ) ,
2015-08-23 21:56:10 +02:00
devicePaused : make ( map [ protocol . DeviceID ] bool ) ,
2015-05-25 11:05:12 +02:00
reqValidationCache : make ( map [ string ] time . Time ) ,
fmut : sync . NewRWMutex ( ) ,
pmut : sync . NewRWMutex ( ) ,
rvmut : sync . NewRWMutex ( ) ,
2013-12-15 11:43:31 +01:00
}
2014-11-25 22:07:18 +00:00
if cfg . Options ( ) . ProgressUpdateIntervalS > - 1 {
go m . progressEmitter . Serve ( )
}
2013-12-15 11:43:31 +01:00
return m
}
2015-04-28 22:32:10 +02:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2015-04-08 13:35:03 +01:00
func ( m * Model ) StartDeadlockDetector ( timeout time . Duration ) {
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2015-04-22 23:54:31 +01:00
deadlockDetect ( m . fmut , timeout )
deadlockDetect ( m . pmut , timeout )
2015-04-08 13:35:03 +01:00
}
2015-04-28 22:32:10 +02:00
// StartFolderRW starts read/write processing on the current model. When in
2014-01-06 11:11:18 +01:00
// read/write mode the model will attempt to keep in sync with the cluster by
2014-09-28 12:00:38 +01:00
// pulling needed files from peer devices.
func ( m * Model ) StartFolderRW ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2014-09-27 14:44:15 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
panic ( "cannot start nonexistent folder " + folder )
2014-09-27 14:44:15 +02:00
}
2014-09-30 17:52:05 +02:00
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
2015-04-09 12:53:41 +02:00
p := newRWFolder ( m , m . shortID , cfg )
2014-09-30 17:52:05 +02:00
m . folderRunners [ folder ] = p
m . fmut . Unlock ( )
2014-09-27 14:44:15 +02:00
if len ( cfg . Versioning . Type ) > 0 {
factory , ok := versioner . Factories [ cfg . Versioning . Type ]
if ! ok {
l . Fatalf ( "Requested versioning type %q that does not exist" , cfg . Versioning . Type )
}
2015-06-20 20:04:47 +02:00
2015-06-12 13:04:00 +02:00
versioner := factory ( folder , cfg . Path ( ) , cfg . Versioning . Params )
if service , ok := versioner . ( suture . Service ) ; ok {
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
m . Add ( service )
}
p . versioner = versioner
2014-03-29 18:53:48 +01:00
}
2014-09-27 14:44:15 +02:00
2015-06-20 20:04:47 +02:00
m . Add ( p )
2015-07-23 16:13:53 +02:00
l . Okln ( "Ready to synchronize" , folder , "(read-write)" )
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
2015-04-28 22:32:10 +02:00
// StartFolderRO starts read only processing on the current model. When in
// read only mode the model will announce files to the cluster but not pull in
// any external changes.
2014-09-28 12:00:38 +01:00
func ( m * Model ) StartFolderRO ( folder string ) {
2014-09-30 17:52:05 +02:00
m . fmut . Lock ( )
cfg , ok := m . folderCfgs [ folder ]
if ! ok {
panic ( "cannot start nonexistent folder " + folder )
}
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
2015-03-16 21:14:19 +01:00
s := newROFolder ( m , folder , time . Duration ( cfg . RescanIntervalS ) * time . Second )
2014-09-30 17:52:05 +02:00
m . folderRunners [ folder ] = s
m . fmut . Unlock ( )
2015-07-23 16:13:53 +02:00
m . Add ( s )
l . Okln ( "Ready to synchronize" , folder , "(read only; no external updates accepted)" )
2014-01-20 22:22:27 +01:00
}
2014-01-05 23:54:57 +01:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 21:56:10 +02:00
Connected bool
Paused bool
2014-01-23 13:12:45 +01:00
Address string
ClientVersion string
2015-07-17 21:22:07 +01:00
Type ConnectionType
2014-01-05 23:54:57 +01:00
}
2015-03-10 23:45:43 +01:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 21:56:10 +02:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 23:45:43 +01:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2015-08-23 21:56:10 +02:00
"type" : info . Type . String ( ) ,
2015-03-10 23:45:43 +01:00
} )
}
2014-09-28 12:00:38 +01:00
// ConnectionStats returns a map with connection statistics for each connected device.
2015-04-07 13:20:40 +01:00
func ( m * Model ) ConnectionStats ( ) map [ string ] interface { } {
2014-01-05 23:54:57 +01:00
type remoteAddrer interface {
RemoteAddr ( ) net . Addr
}
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-01-05 16:16:37 +01:00
2015-08-23 21:56:10 +02:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
for device := range devs {
2014-01-05 23:54:57 +01:00
ci := ConnectionInfo {
2014-09-28 12:00:38 +01:00
ClientVersion : m . deviceVer [ device ] ,
2015-08-23 21:56:10 +02:00
Paused : m . devicePaused [ device ] ,
2014-01-05 23:54:57 +01:00
}
2015-08-23 21:56:10 +02:00
if conn , ok := m . conn [ device ] ; ok {
2015-07-17 21:22:07 +01:00
ci . Type = conn . Type
2015-08-23 21:56:10 +02:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 23:54:57 +01:00
}
2014-02-13 12:41:37 +01:00
2015-04-07 13:20:40 +01:00
conns [ device . String ( ) ] = ci
2013-12-30 09:30:29 -05:00
}
2014-01-17 20:06:44 -07:00
2015-04-07 13:20:40 +01:00
res [ "connections" ] = conns
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-03-28 14:36:57 +01:00
2014-05-24 21:34:11 +02:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 21:56:05 +02:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 21:34:11 +02:00
} ,
}
2014-01-05 16:16:37 +01:00
return res
2013-12-30 09:30:29 -05:00
}
2015-04-28 22:32:10 +02:00
// DeviceStatistics returns statistics about each device
2014-09-28 12:00:38 +01:00
func ( m * Model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
var res = make ( map [ string ] stats . DeviceStatistics )
2014-10-06 09:25:45 +02:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 23:45:40 +01:00
}
return res
}
2015-04-28 22:32:10 +02:00
// FolderStatistics returns statistics about each folder
2014-12-07 20:21:12 +00:00
func ( m * Model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
var res = make ( map [ string ] stats . FolderStatistics )
for id := range m . cfg . Folders ( ) {
res [ id ] = m . folderStatRef ( id ) . GetStatistics ( )
}
return res
}
2015-04-28 22:32:10 +02:00
// Completion returns the completion status, in percent, for the given device
// and folder.
2014-09-28 12:00:38 +01:00
func ( m * Model ) Completion ( device protocol . DeviceID , folder string ) float64 {
2014-07-29 11:06:52 +02:00
var tot int64
2014-08-05 20:16:25 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-08-05 20:16:25 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
return 0 // Folder doesn't exist, so we hardly have any of it
2014-08-05 20:16:25 +02:00
}
2015-01-12 14:50:30 +01:00
rf . WithGlobalTruncated ( func ( f db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
if ! f . IsDeleted ( ) {
tot += f . Size ( )
2014-07-29 11:06:52 +02:00
}
return true
} )
2014-08-05 20:16:25 +02:00
if tot == 0 {
2014-09-28 12:00:38 +01:00
return 100 // Folder is empty, so we have all of it
2014-08-05 20:16:25 +02:00
}
2014-07-29 11:06:52 +02:00
var need int64
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
if ! f . IsDeleted ( ) {
need += f . Size ( )
2014-07-29 11:06:52 +02:00
}
return true
} )
2014-08-12 13:53:31 +02:00
res := 100 * ( 1 - float64 ( need ) / float64 ( tot ) )
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d)" , m , device , folder , res , need , tot )
2014-08-12 13:53:31 +02:00
}
return res
2014-07-29 11:06:52 +02:00
}
2014-07-12 23:06:48 +02:00
func sizeOf ( fs [ ] protocol . FileInfo ) ( files , deleted int , bytes int64 ) {
2014-03-28 14:36:57 +01:00
for _ , f := range fs {
2014-07-06 14:46:48 +02:00
fs , de , by := sizeOfFile ( f )
files += fs
deleted += de
bytes += by
}
return
}
2015-01-12 14:50:30 +01:00
func sizeOfFile ( f db . FileIntf ) ( files , deleted int , bytes int64 ) {
2014-08-12 13:53:31 +02:00
if ! f . IsDeleted ( ) {
2014-07-06 14:46:48 +02:00
files ++
} else {
deleted ++
2013-12-30 09:30:29 -05:00
}
2014-08-12 13:53:31 +02:00
bytes += f . Size ( )
2014-01-05 16:16:37 +01:00
return
}
2013-12-30 09:30:29 -05:00
2014-03-28 14:36:57 +01:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2015-01-09 08:18:42 +01:00
func ( m * Model ) GlobalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 14:50:30 +01:00
rf . WithGlobalTruncated ( func ( f db . FileIntf ) bool {
2014-07-06 14:46:48 +02:00
fs , de , by := sizeOfFile ( f )
2015-01-09 08:18:42 +01:00
nfiles += fs
2014-07-06 14:46:48 +02:00
deleted += de
bytes += by
return true
} )
2014-03-29 18:53:48 +01:00
}
2014-07-06 14:46:48 +02:00
return
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 12:00:38 +01:00
// files in the local folder.
2015-01-09 08:18:42 +01:00
func ( m * Model ) LocalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 14:50:30 +01:00
rf . WithHaveTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-09-04 22:29:53 +02:00
if f . IsInvalid ( ) {
return true
}
2014-07-06 14:46:48 +02:00
fs , de , by := sizeOfFile ( f )
2015-01-09 08:18:42 +01:00
nfiles += fs
2014-07-06 14:46:48 +02:00
deleted += de
bytes += by
return true
} )
2014-03-29 18:53:48 +01:00
}
2014-07-06 23:15:28 +02:00
return
2014-01-06 06:38:01 +01:00
}
2014-05-19 22:31:28 +02:00
// NeedSize returns the number and total size of currently needed files.
2015-01-09 08:18:42 +01:00
func ( m * Model ) NeedSize ( folder string ) ( nfiles int , bytes int64 ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-07-15 17:54:00 +02:00
fs , de , by := sizeOfFile ( f )
2015-01-09 08:18:42 +01:00
nfiles += fs + de
2014-07-15 17:54:00 +02:00
bytes += by
return true
} )
}
2014-11-16 23:18:59 +00:00
bytes -= m . progressEmitter . BytesCompleted ( folder )
2014-08-12 13:53:31 +02:00
if debug {
2015-01-09 08:18:42 +01:00
l . Debugf ( "%v NeedSize(%q): %d %d" , m , folder , nfiles , bytes )
2014-08-12 13:53:31 +02:00
}
2014-07-15 17:54:00 +02:00
return
2013-12-23 12:12:44 -05:00
}
2015-04-28 22:32:10 +02:00
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2015-04-25 22:53:44 +01:00
func ( m * Model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , int ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2015-01-17 21:51:46 +01:00
2015-04-25 22:53:44 +01:00
total := 0
2014-12-01 19:23:06 +00:00
2015-04-25 22:53:44 +01:00
rf , ok := m . folderFiles [ folder ]
if ! ok {
return nil , nil , nil , 0
}
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
runner , ok := m . folderRunners [ folder ]
if ok {
allProgressNames , allQueuedNames := runner . Jobs ( )
var progressNames , queuedNames [ ] string
progressNames , skip , get = getChunk ( allProgressNames , skip , get )
queuedNames , skip , get = getChunk ( allQueuedNames , skip , get )
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
seen [ name ] = struct { } { }
2014-12-01 19:23:06 +00:00
}
}
2015-04-25 22:53:44 +01:00
for i , name := range queuedNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
seen [ name ] = struct { } { }
}
2014-12-01 19:23:06 +00:00
}
2014-04-09 22:03:30 +02:00
}
2015-04-25 22:53:44 +01:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
total ++
if skip > 0 {
skip --
return true
}
if get > 0 {
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
}
}
return true
} )
return progress , queued , rest , total
2014-04-01 23:18:32 +02:00
}
2014-09-28 12:00:38 +01:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2015-01-14 22:11:31 +00:00
func ( m * Model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , flags uint32 , options [ ] protocol . Option ) {
2015-01-14 22:28:19 +00:00
if flags != 0 {
l . Warnln ( "protocol error: unknown flags 0x%x in Index message" , flags )
return
}
2014-05-15 00:26:55 -03:00
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "IDX(in): %s %q: %d files" , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
events . Default . Log ( events . FolderRejected , map [ string ] string {
"folder" : folder ,
"device" : deviceID . String ( ) ,
2014-08-18 23:34:03 +02:00
} )
2014-12-27 23:12:12 +00:00
l . Infof ( "Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-07-21 13:14:33 +02:00
cfg := m . folderCfgs [ folder ]
2014-09-28 12:00:38 +01:00
files , ok := m . folderFiles [ folder ]
2015-05-07 22:45:07 +02:00
runner := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
2015-05-07 22:45:07 +02:00
if runner != nil {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
defer runner . IndexUpdated ( )
}
2014-09-04 22:29:53 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
l . Fatalf ( "Index for nonexistant folder %q" , folder )
2013-12-15 11:43:31 +01:00
}
2014-07-13 21:07:24 +02:00
2015-07-21 13:14:33 +02:00
fs = filterIndex ( folder , fs , cfg . IgnoreDelete )
2014-09-28 12:00:38 +01:00
files . Replace ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2014-09-28 12:00:38 +01:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2013-12-28 08:10:36 -05:00
}
2014-09-28 12:00:38 +01:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2015-01-14 22:11:31 +00:00
func ( m * Model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , flags uint32 , options [ ] protocol . Option ) {
2015-01-14 22:28:19 +00:00
if flags != 0 {
l . Warnln ( "protocol error: unknown flags 0x%x in IndexUpdate message" , flags )
return
}
2014-05-15 00:26:55 -03:00
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "%v IDXUP(in): %s / %q: %d files" , m , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Infof ( "Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-05-07 22:45:07 +02:00
files := m . folderFiles [ folder ]
2015-07-21 13:14:33 +02:00
cfg := m . folderCfgs [ folder ]
2015-05-07 22:45:07 +02:00
runner , ok := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
l . Fatalf ( "IndexUpdate for nonexistant folder %q" , folder )
2013-12-28 08:10:36 -05:00
}
2014-07-13 21:07:24 +02:00
2015-07-21 13:14:33 +02:00
fs = filterIndex ( folder , fs , cfg . IgnoreDelete )
2014-09-28 12:00:38 +01:00
files . Update ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2014-09-28 12:00:38 +01:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2015-05-07 22:45:07 +02:00
runner . IndexUpdated ( )
2014-01-09 10:59:09 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) folderSharedWith ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
for _ , nfolder := range m . deviceFolders [ deviceID ] {
if nfolder == folder {
2014-06-06 21:48:29 +02:00
return true
}
}
return false
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfigMessage ) {
2014-04-13 15:28:26 +02:00
m . pmut . Lock ( )
2014-09-23 16:04:20 +02:00
if cm . ClientName == "syncthing" {
2014-09-28 12:00:38 +01:00
m . deviceVer [ deviceID ] = cm . ClientVersion
2014-04-13 15:28:26 +02:00
} else {
2014-09-28 12:00:38 +01:00
m . deviceVer [ deviceID ] = cm . ClientName + " " + cm . ClientVersion
2014-04-13 15:28:26 +02:00
}
2014-12-26 23:12:12 +00:00
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"clientName" : cm . ClientName ,
"clientVersion" : cm . ClientVersion ,
}
2015-06-28 16:05:29 +01:00
if conn , ok := m . conn [ deviceID ] ; ok {
2015-07-17 21:22:07 +01:00
event [ "type" ] = conn . Type . String ( )
2015-06-28 16:05:29 +01:00
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
2014-12-26 23:12:12 +00:00
}
2014-08-16 22:55:02 +02:00
m . pmut . Unlock ( )
2014-12-26 23:12:12 +00:00
events . Default . Log ( events . DeviceConnected , event )
2014-09-28 12:00:38 +01:00
l . Infof ( ` Device %s client is "%s %s" ` , deviceID , cm . ClientName , cm . ClientVersion )
2014-08-16 22:55:02 +02:00
2014-11-12 23:42:17 +00:00
var changed bool
2014-09-23 16:04:20 +02:00
if name := cm . GetOption ( "name" ) ; name != "" {
2014-09-28 12:00:38 +01:00
l . Infof ( "Device %s name is %q" , deviceID , name )
2014-10-06 09:25:45 +02:00
device , ok := m . cfg . Devices ( ) [ deviceID ]
if ok && device . Name == "" {
2014-09-28 12:00:38 +01:00
device . Name = name
2014-10-06 09:25:45 +02:00
m . cfg . SetDevice ( device )
2014-11-12 23:42:17 +00:00
changed = true
2014-08-14 23:15:26 +01:00
}
}
2014-09-23 16:04:20 +02:00
2014-10-06 09:25:45 +02:00
if m . cfg . Devices ( ) [ deviceID ] . Introducer {
2014-09-28 12:00:38 +01:00
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing.
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
for _ , folder := range cm . Folders {
// If we don't have this folder yet, skip it. Ideally, we'd
// offer up something in the GUI to create the folder, but for the
// moment we only handle folders that we already have.
if _ , ok := m . folderDevices [ folder . ID ] ; ! ok {
2014-09-23 16:04:20 +02:00
continue
}
2014-09-28 12:00:38 +01:00
nextDevice :
for _ , device := range folder . Devices {
var id protocol . DeviceID
copy ( id [ : ] , device . ID )
2014-09-23 16:04:20 +02:00
2014-10-06 09:25:45 +02:00
if _ , ok := m . cfg . Devices ( ) [ id ] ; ! ok {
2014-09-28 12:00:38 +01:00
// The device is currently unknown. Add it to the config.
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , id , deviceID )
newDeviceCfg := config . DeviceConfiguration {
2014-10-06 09:25:45 +02:00
DeviceID : id ,
2014-12-07 22:43:30 +00:00
Compression : m . cfg . Devices ( ) [ deviceID ] . Compression ,
2014-10-18 20:40:31 +02:00
Addresses : [ ] string { "dynamic" } ,
2014-09-23 16:04:20 +02:00
}
// The introducers' introducers are also our introducers.
2014-09-28 12:00:38 +01:00
if device . Flags & protocol . FlagIntroducer != 0 {
l . Infof ( "Device %v is now also an introducer" , id )
newDeviceCfg . Introducer = true
2014-09-23 16:04:20 +02:00
}
2014-10-06 09:25:45 +02:00
m . cfg . SetDevice ( newDeviceCfg )
2014-09-23 16:04:20 +02:00
changed = true
}
2014-09-28 12:00:38 +01:00
for _ , er := range m . deviceFolders [ id ] {
if er == folder . ID {
// We already share the folder with this device, so
2014-09-23 16:04:20 +02:00
// nothing to do.
2014-09-28 12:00:38 +01:00
continue nextDevice
2014-09-23 16:04:20 +02:00
}
}
2014-09-28 12:00:38 +01:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
l . Infof ( "Adding device %v to share %q (vouched for by introducer %v)" , id , folder . ID , deviceID )
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
m . deviceFolders [ id ] = append ( m . deviceFolders [ id ] , folder . ID )
m . folderDevices [ folder . ID ] = append ( m . folderDevices [ folder . ID ] , id )
2014-09-23 16:04:20 +02:00
2014-10-06 09:25:45 +02:00
folderCfg := m . cfg . Folders ( ) [ folder . ID ]
2014-09-28 12:00:38 +01:00
folderCfg . Devices = append ( folderCfg . Devices , config . FolderDeviceConfiguration {
DeviceID : id ,
2014-09-23 16:04:20 +02:00
} )
2014-10-06 09:25:45 +02:00
m . cfg . SetFolder ( folderCfg )
2014-09-23 16:04:20 +02:00
changed = true
}
}
2014-11-12 23:42:17 +00:00
}
2014-09-23 16:04:20 +02:00
2014-11-12 23:42:17 +00:00
if changed {
m . cfg . Save ( )
2014-09-23 16:04:20 +02:00
}
2014-04-13 15:28:26 +02:00
}
2014-01-20 22:22:27 +01:00
// Close removes the peer from the model and closes the underlying connection if possible.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2014-09-28 12:00:38 +01:00
func ( m * Model ) Close ( device protocol . DeviceID , err error ) {
l . Infof ( "Connection to %s closed: %v" , device , err )
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
2014-07-13 21:07:24 +02:00
"error" : err . Error ( ) ,
} )
2014-02-09 23:13:06 +01:00
2014-07-15 13:04:37 +02:00
m . pmut . Lock ( )
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
for _ , folder := range m . deviceFolders [ device ] {
m . folderFiles [ folder ] . Replace ( device , nil )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-01-20 22:22:27 +01:00
2015-06-28 16:05:29 +01:00
conn , ok := m . conn [ device ]
2014-01-01 08:09:17 -05:00
if ok {
2015-07-22 09:02:55 +02:00
closeRawConn ( conn )
2013-12-30 21:21:57 -05:00
}
2015-06-28 16:05:29 +01:00
delete ( m . conn , device )
2014-09-28 12:00:38 +01:00
delete ( m . deviceVer , device )
2014-01-17 20:06:44 -07:00
m . pmut . Unlock ( )
2013-12-15 11:43:31 +01:00
}
2014-01-06 11:11:18 +01:00
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2015-07-29 21:38:22 +01:00
func ( m * Model ) Request ( deviceID protocol . DeviceID , folder , name string , offset int64 , hash [ ] byte , flags uint32 , options [ ] protocol . Option , buf [ ] byte ) error {
if offset < 0 {
return protocol . ErrNoSuchFile
2015-01-18 02:12:06 +01:00
}
2015-01-16 12:25:54 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2015-07-29 21:38:22 +01:00
return protocol . ErrNoSuchFile
2015-01-16 12:25:54 +01:00
}
2015-01-14 22:28:19 +00:00
if flags != 0 {
// We don't currently support or expect any flags.
2015-07-29 21:38:22 +01:00
return fmt . Errorf ( "protocol error: unknown flags 0x%x in Request message" , flags )
2015-01-14 22:28:19 +00:00
}
2015-05-25 11:05:12 +02:00
// Verify that the requested file exists in the local model. We only need
// to validate this file if we haven't done so recently, so we keep a
// cache of successfull results. "Recently" can be quite a long time, as
// we remove validation cache entries when we detect local changes. If
// we're out of sync here and the file actually doesn't exist any more, or
// has shrunk or something, then we'll anyway get a read error that we
// pass on to the other side.
2014-03-29 18:53:48 +01:00
2015-05-25 11:05:12 +02:00
m . rvmut . RLock ( )
validated := m . reqValidationCache [ folder + "/" + name ]
m . rvmut . RUnlock ( )
2014-03-29 18:53:48 +01:00
2015-05-25 11:05:12 +02:00
if time . Since ( validated ) > reqValidationTime {
m . fmut . RLock ( )
folderFiles , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
2015-01-06 22:12:45 +01:00
2015-05-25 11:05:12 +02:00
if ! ok {
l . Warnf ( "Request from %s for file %s in nonexistent folder %q" , deviceID , name , folder )
2015-07-29 21:38:22 +01:00
return protocol . ErrNoSuchFile
2014-05-20 20:26:44 +02:00
}
2014-03-29 18:53:48 +01:00
2015-05-25 11:05:12 +02:00
// This call is really expensive for large files, as we load the full
// block list which may be megabytes and megabytes of data to allocate
// space for, read, and deserialize.
lf , ok := folderFiles . Get ( protocol . LocalDeviceID , name )
if ! ok {
2015-07-29 21:38:22 +01:00
return protocol . ErrNoSuchFile
2014-05-11 14:54:26 -03:00
}
2015-05-25 11:05:12 +02:00
if lf . IsInvalid ( ) || lf . IsDeleted ( ) {
if debug {
2015-07-29 21:38:22 +01:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v" , m , deviceID , folder , name , offset , len ( buf ) , lf )
2015-05-25 11:05:12 +02:00
}
2015-07-29 21:38:22 +01:00
return protocol . ErrInvalid
2015-05-25 11:05:12 +02:00
}
if offset > lf . Size ( ) {
if debug {
2015-07-29 21:38:22 +01:00
l . Debugf ( "%v REQ(in; nonexistent): %s: %q o=%d s=%d" , m , deviceID , name , offset , len ( buf ) )
2015-05-25 11:05:12 +02:00
}
2015-07-29 21:38:22 +01:00
return protocol . ErrNoSuchFile
2015-05-25 11:05:12 +02:00
}
m . rvmut . Lock ( )
m . reqValidationCache [ folder + "/" + name ] = time . Now ( )
if len ( m . reqValidationCache ) > reqValidationCacheSize {
// Don't let the cache grow infinitely
for name , validated := range m . reqValidationCache {
if time . Since ( validated ) > time . Minute {
delete ( m . reqValidationCache , name )
}
}
2015-07-20 15:29:05 +02:00
if len ( m . reqValidationCache ) > reqValidationCacheSize * 9 / 10 {
// The first clean didn't help much, we're still over 90%
// full; we may have synced a lot of files lately. Prune the
// cache more aggressively by removing every other item so we
// don't get stuck doing useless cache cleaning.
i := 0
for name := range m . reqValidationCache {
if i % 2 == 0 {
delete ( m . reqValidationCache , name )
}
i ++
}
}
2015-05-25 11:05:12 +02:00
}
m . rvmut . Unlock ( )
2014-01-07 22:44:21 +01:00
}
2014-01-06 21:31:36 +01:00
2014-09-28 12:00:38 +01:00
if debug && deviceID != protocol . LocalDeviceID {
2015-07-29 21:38:22 +01:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
2013-12-15 11:43:31 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-05 22:52:22 +02:00
fn := filepath . Join ( m . folderCfgs [ folder ] . Path ( ) , name )
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-11-09 04:26:52 +00:00
var reader io . ReaderAt
var err error
2015-05-25 11:05:12 +02:00
if info , err := os . Lstat ( fn ) ; err == nil && info . Mode ( ) & os . ModeSymlink != 0 {
2014-11-09 04:26:52 +00:00
target , _ , err := symlinks . Read ( fn )
if err != nil {
2015-07-29 21:38:22 +01:00
return err
2014-11-09 04:26:52 +00:00
}
reader = strings . NewReader ( target )
} else {
2015-01-28 14:32:59 +00:00
// Cannot easily cache fd's because we might need to delete the file
// at any moment.
reader , err = os . Open ( fn )
2014-11-09 04:26:52 +00:00
if err != nil {
2015-07-29 21:38:22 +01:00
return err
2014-11-09 04:26:52 +00:00
}
2014-12-08 11:54:22 +00:00
defer reader . ( * os . File ) . Close ( )
2013-12-15 11:43:31 +01:00
}
2014-11-09 04:26:52 +00:00
_ , err = reader . ReadAt ( buf , offset )
2013-12-15 11:43:31 +01:00
if err != nil {
2015-07-29 21:38:22 +01:00
return err
2013-12-15 11:43:31 +01:00
}
2015-07-29 21:38:22 +01:00
return nil
2013-12-15 11:43:31 +01:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
f , ok := fs . Get ( protocol . LocalDeviceID , file )
2015-01-06 22:12:45 +01:00
return f , ok
2014-04-01 23:18:32 +02:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
f , ok := fs . GetGlobal ( file )
2015-01-06 22:12:45 +01:00
return f , ok
2014-04-01 23:18:32 +02:00
}
2014-03-29 18:53:48 +01:00
type cFiler struct {
m * Model
r string
2014-01-06 11:11:18 +01:00
}
2014-03-16 08:14:55 +01:00
// Implements scanner.CurrentFiler
2015-01-06 22:12:45 +01:00
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:00:38 +01:00
return cf . m . CurrentFolderFile ( cf . r , file )
2014-03-16 08:14:55 +01:00
}
2014-09-28 12:00:38 +01:00
// ConnectedTo returns true if we are connected to the named device.
func ( m * Model ) ConnectedTo ( deviceID protocol . DeviceID ) bool {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ deviceID ]
2014-09-20 19:14:45 +02:00
m . pmut . RUnlock ( )
2014-09-10 11:29:01 +02:00
if ok {
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-10 11:29:01 +02:00
}
2014-01-06 11:11:18 +01:00
return ok
}
2014-11-08 22:12:18 +01:00
func ( m * Model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-09-14 23:03:53 +01:00
var lines [ ] string
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-14 23:03:53 +01:00
if ! ok {
2014-11-08 22:12:18 +01:00
return lines , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2015-04-05 22:52:22 +02:00
fd , err := os . Open ( filepath . Join ( cfg . Path ( ) , ".stignore" ) )
2014-09-14 23:03:53 +01:00
if err != nil {
if os . IsNotExist ( err ) {
2014-11-08 22:12:18 +01:00
return lines , nil , nil
2014-09-14 23:03:53 +01:00
}
l . Warnln ( "Loading .stignore:" , err )
2014-11-08 22:12:18 +01:00
return lines , nil , err
2014-09-14 23:03:53 +01:00
}
defer fd . Close ( )
scanner := bufio . NewScanner ( fd )
for scanner . Scan ( ) {
lines = append ( lines , strings . TrimSpace ( scanner . Text ( ) ) )
}
2014-11-29 22:29:49 +01:00
m . fmut . RLock ( )
2015-04-27 20:49:10 +01:00
patterns := m . folderIgnores [ folder ] . Patterns ( )
2014-11-29 22:29:49 +01:00
m . fmut . RUnlock ( )
2014-11-08 22:12:18 +01:00
return lines , patterns , nil
2014-09-14 23:03:53 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) SetIgnores ( folder string , content [ ] string ) error {
cfg , ok := m . folderCfgs [ folder ]
2014-09-14 23:03:53 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2015-08-30 12:59:01 +01:00
path := filepath . Join ( cfg . Path ( ) , ".stignore" )
fd , err := osutil . CreateAtomic ( path , 0644 )
2014-09-14 23:03:53 +01:00
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
for _ , line := range content {
2015-07-12 01:03:40 +10:00
fmt . Fprintln ( fd , line )
2014-09-14 23:03:53 +01:00
}
2015-07-12 01:03:40 +10:00
if err := fd . Close ( ) ; err != nil {
2014-09-14 23:03:53 +01:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2015-08-30 12:59:01 +01:00
osutil . HideFile ( path )
2014-09-14 23:03:53 +01:00
2014-09-28 12:00:38 +01:00
return m . ScanFolder ( folder )
2014-09-14 23:03:53 +01:00
}
2014-01-06 11:11:18 +01:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 12:00:38 +01:00
// folder changes.
2015-06-28 16:05:29 +01:00
func ( m * Model ) AddConnection ( conn Connection ) {
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2015-06-28 16:05:29 +01:00
if _ , ok := m . conn [ deviceID ] ; ok {
2014-09-28 12:00:38 +01:00
panic ( "add existing device" )
2014-03-23 08:45:05 +01:00
}
2015-06-28 16:05:29 +01:00
m . conn [ deviceID ] = conn
2014-01-06 11:11:18 +01:00
2015-06-28 16:05:29 +01:00
conn . Start ( )
2015-07-10 16:37:57 +10:00
2014-09-28 12:00:38 +01:00
cm := m . clusterConfig ( deviceID )
2015-06-28 16:05:29 +01:00
conn . ClusterConfig ( cm )
2014-04-13 15:28:26 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
for _ , folder := range m . deviceFolders [ deviceID ] {
fs := m . folderFiles [ folder ]
2015-06-28 16:05:29 +01:00
go sendIndexes ( conn , folder , fs , m . folderIgnores [ folder ] )
2014-05-04 17:18:58 +02:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-07-15 13:04:37 +02:00
m . pmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-20 19:14:45 +02:00
}
2015-08-23 21:56:10 +02:00
func ( m * Model ) PauseDevice ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . devicePaused [ device ] = true
_ , ok := m . conn [ device ]
m . pmut . Unlock ( )
if ok {
m . Close ( device , errors . New ( "device paused" ) )
}
events . Default . Log ( events . DevicePaused , map [ string ] string { "device" : device . String ( ) } )
}
func ( m * Model ) ResumeDevice ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . devicePaused [ device ] = false
m . pmut . Unlock ( )
events . Default . Log ( events . DeviceResumed , map [ string ] string { "device" : device . String ( ) } )
}
func ( m * Model ) IsPaused ( device protocol . DeviceID ) bool {
m . pmut . Lock ( )
paused := m . devicePaused [ device ]
m . pmut . Unlock ( )
return paused
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2014-09-28 12:00:38 +01:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 19:14:45 +02:00
return sr
}
2014-12-08 16:36:15 +01:00
2015-09-04 13:22:59 +02:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
2014-12-08 16:36:15 +01:00
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 19:14:45 +02:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 13:04:37 +02:00
}
2014-12-07 20:21:12 +00:00
func ( m * Model ) folderStatRef ( folder string ) * stats . FolderStatisticsReference {
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-12-16 23:33:28 +01:00
sr , ok := m . folderStatRefs [ folder ]
if ! ok {
2014-12-07 20:21:12 +00:00
sr = stats . NewFolderStatisticsReference ( m . db , folder )
m . folderStatRefs [ folder ] = sr
}
2014-12-16 23:33:28 +01:00
return sr
2014-12-07 20:21:12 +00:00
}
2015-06-16 12:12:34 +01:00
func ( m * Model ) receivedFile ( folder string , file protocol . FileInfo ) {
2015-09-04 13:22:59 +02:00
m . folderStatRef ( folder ) . ReceivedFile ( file . Name , file . IsDeleted ( ) )
2014-12-07 20:21:12 +00:00
}
2015-01-12 14:52:24 +01:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
name := conn . Name ( )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q starting" , deviceID , name , folder )
2014-07-15 13:04:37 +02:00
}
2014-05-04 17:18:58 +02:00
2014-09-28 12:00:38 +01:00
minLocalVer , err := sendIndexTo ( true , 0 , conn , folder , fs , ignores )
2014-07-30 20:08:04 +02:00
2015-07-28 21:22:44 +04:00
sub := events . Default . Subscribe ( events . LocalIndexUpdated )
defer events . Default . Unsubscribe ( sub )
2014-07-15 13:04:37 +02:00
for err == nil {
2015-07-28 21:22:44 +04:00
// While we have sent a localVersion at least equal to the one
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2014-09-28 12:00:38 +01:00
if fs . LocalVersion ( protocol . LocalDeviceID ) <= minLocalVer {
2015-07-28 21:22:44 +04:00
sub . Poll ( time . Minute )
2014-07-30 20:08:04 +02:00
continue
2014-07-15 13:04:37 +02:00
}
2014-09-28 12:00:38 +01:00
minLocalVer , err = sendIndexTo ( false , minLocalVer , conn , folder , fs , ignores )
2015-07-28 21:22:44 +04:00
// Wait a short amount of time before entering the next loop. If there
// are continous changes happening to the local index, this gives us
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 20:08:04 +02:00
}
2014-09-27 14:44:15 +02:00
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q exiting: %v" , deviceID , name , folder , err )
2014-09-27 14:44:15 +02:00
}
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
2015-01-18 02:12:06 +01:00
func sendIndexTo ( initial bool , minLocalVer int64 , conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) ( int64 , error ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-30 20:08:04 +02:00
name := conn . Name ( )
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 20:54:59 +02:00
currentBatchSize := 0
2015-01-18 02:12:06 +01:00
maxLocalVer := int64 ( 0 )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2015-01-12 14:50:30 +01:00
fs . WithHave ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
f := fi . ( protocol . FileInfo )
2014-07-30 20:08:04 +02:00
if f . LocalVersion <= minLocalVer {
return true
}
2014-07-15 13:04:37 +02:00
2014-07-30 20:08:04 +02:00
if f . LocalVersion > maxLocalVer {
maxLocalVer = f . LocalVersion
}
2014-07-15 13:04:37 +02:00
2015-06-15 00:44:24 +02:00
if ignores . Match ( f . Name ) || symlinkInvalid ( folder , f ) {
2014-10-16 12:23:33 +02:00
if debug {
2014-11-09 04:26:52 +00:00
l . Debugln ( "not sending update for ignored/unsupported symlink" , f )
2014-10-16 12:23:33 +02:00
}
2014-09-04 22:29:53 +02:00
return true
}
2014-08-11 20:54:59 +02:00
if len ( batch ) == indexBatchSize || currentBatchSize > indexTargetSize {
2014-07-30 20:08:04 +02:00
if initial {
2015-01-14 22:11:31 +00:00
if err = conn . Index ( folder , batch , 0 , nil ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 20:08:04 +02:00
}
initial = false
} else {
2015-01-14 22:11:31 +00:00
if err = conn . IndexUpdate ( folder , batch , 0 , nil ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
if debug {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 20:08:04 +02:00
}
2014-07-03 12:30:10 +02:00
}
2014-01-06 11:11:18 +01:00
2014-07-30 20:08:04 +02:00
batch = make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 20:54:59 +02:00
currentBatchSize = 0
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
batch = append ( batch , f )
2015-05-25 11:05:12 +02:00
currentBatchSize += indexPerFileSize + len ( f . Blocks ) * indexPerBlockSize
2014-07-30 20:08:04 +02:00
return true
} )
if initial && err == nil {
2015-01-14 22:11:31 +00:00
err = conn . Index ( folder , batch , 0 , nil )
2014-07-30 20:08:04 +02:00
if debug && err == nil {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (small initial index)" , deviceID , name , folder , len ( batch ) )
2014-07-30 20:08:04 +02:00
}
} else if len ( batch ) > 0 && err == nil {
2015-01-14 22:11:31 +00:00
err = conn . IndexUpdate ( folder , batch , 0 , nil )
2014-07-30 20:08:04 +02:00
if debug && err == nil {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (last batch)" , deviceID , name , folder , len ( batch ) )
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
return maxLocalVer , err
2014-01-06 11:11:18 +01:00
}
2015-04-05 15:34:29 +02:00
func ( m * Model ) updateLocals ( folder string , fs [ ] protocol . FileInfo ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-06-16 08:30:15 +02:00
files := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-06-16 08:30:15 +02:00
files . Update ( protocol . LocalDeviceID , fs )
2015-05-25 11:05:12 +02:00
m . rvmut . Lock ( )
for _ , f := range fs {
delete ( m . reqValidationCache , folder + "/" + f . Name )
}
m . rvmut . Unlock ( )
2015-04-05 15:34:29 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2015-06-16 08:30:15 +02:00
"folder" : folder ,
"items" : len ( fs ) ,
"version" : files . LocalVersion ( protocol . LocalDeviceID ) ,
2014-07-17 13:38:36 +02:00
} )
2014-03-28 14:36:57 +01:00
}
2015-01-14 22:11:31 +00:00
func ( m * Model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , flags uint32 , options [ ] protocol . Option ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
nc , ok := m . conn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 11:11:18 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 11:11:18 +01:00
}
2014-05-15 00:26:55 -03:00
if debug {
2015-01-14 22:11:31 +00:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x f=%x op=%s" , m , deviceID , folder , name , offset , size , hash , flags , options )
2014-01-06 11:11:18 +01:00
}
2015-01-14 22:11:31 +00:00
return nc . Request ( folder , name , offset , size , hash , flags , options )
2014-01-06 11:11:18 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) AddFolder ( cfg config . FolderConfiguration ) {
2014-05-23 14:31:16 +02:00
if len ( cfg . ID ) == 0 {
2014-09-28 12:00:38 +01:00
panic ( "cannot add empty folder id" )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2014-09-28 12:00:38 +01:00
m . folderCfgs [ cfg . ID ] = cfg
2015-01-12 14:52:24 +01:00
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , m . db )
2013-12-15 11:43:31 +01:00
2014-09-28 12:00:38 +01:00
m . folderDevices [ cfg . ID ] = make ( [ ] protocol . DeviceID , len ( cfg . Devices ) )
for i , device := range cfg . Devices {
m . folderDevices [ cfg . ID ] [ i ] = device . DeviceID
m . deviceFolders [ device . DeviceID ] = append ( m . deviceFolders [ device . DeviceID ] , cfg . ID )
2014-03-29 18:53:48 +01:00
}
2014-01-23 22:20:15 +01:00
2015-07-23 16:13:53 +02:00
ignores := ignore . New ( m . cacheIgnoredFiles )
2015-04-05 22:52:22 +02:00
_ = ignores . Load ( filepath . Join ( cfg . Path ( ) , ".stignore" ) ) // Ignore error, there might not be an .stignore
2014-11-22 02:19:16 +00:00
m . folderIgnores [ cfg . ID ] = ignores
2014-09-28 12:39:39 +01:00
m . fmut . Unlock ( )
2014-03-29 18:53:48 +01:00
}
2014-01-23 22:20:15 +01:00
2015-02-11 19:52:59 +01:00
func ( m * Model ) ScanFolders ( ) map [ string ] error {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-13 05:12:01 +09:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 12:00:38 +01:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 23:54:31 +01:00
errorsMut := sync . NewMutex ( )
2015-02-11 19:52:59 +01:00
2015-04-22 23:54:31 +01:00
wg := sync . NewWaitGroup ( )
2014-09-28 12:00:38 +01:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 20:42:12 -03:00
go func ( ) {
2014-09-28 12:00:38 +01:00
err := m . ScanFolder ( folder )
2014-05-28 06:55:30 +02:00
if err != nil {
2015-02-11 19:52:59 +01:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2015-04-13 05:12:01 +09:00
2015-03-28 14:25:42 +00:00
// Potentially sets the error twice, once in the scanner just
// by doing a check, and once here, if the error returned is
// the same one as returned by CheckFolderHealth, though
2015-04-13 05:12:01 +09:00
// duplicate set is handled by setError.
m . fmut . RLock ( )
srv := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
srv . setError ( err )
2014-05-28 06:55:30 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Done ( )
} ( )
2014-04-14 09:58:17 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Wait ( )
2015-02-11 19:52:59 +01:00
return errors
2014-03-29 18:53:48 +01:00
}
2013-12-15 11:43:31 +01:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) ScanFolder ( folder string ) error {
2015-03-27 09:51:18 +01:00
return m . ScanFolderSubs ( folder , nil )
2014-08-11 20:20:01 +02:00
}
2015-03-27 09:51:18 +01:00
func ( m * Model ) ScanFolderSubs ( folder string , subs [ ] string ) error {
2015-06-20 19:26:25 +02:00
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
// Folders are added to folderRunners only when they are started. We can't
// scan them before they have started, so that's what we need to check for
// here.
if ! ok {
return errors . New ( "no such folder" )
}
return runner . Scan ( subs )
}
func ( m * Model ) internalScanFolderSubs ( folder string , subs [ ] string ) error {
2015-03-27 09:51:18 +01:00
for i , sub := range subs {
sub = osutil . NativeFilename ( sub )
if p := filepath . Clean ( filepath . Join ( folder , sub ) ) ; ! strings . HasPrefix ( p , folder ) {
return errors . New ( "invalid subpath" )
}
subs [ i ] = sub
2014-08-11 20:20:01 +02:00
}
2014-11-29 22:29:49 +01:00
m . fmut . Lock ( )
2015-03-16 21:14:19 +01:00
fs := m . folderFiles [ folder ]
2014-12-23 13:41:02 +01:00
folderCfg := m . folderCfgs [ folder ]
2014-12-23 10:05:08 +01:00
ignores := m . folderIgnores [ folder ]
2015-03-16 21:14:19 +01:00
runner , ok := m . folderRunners [ folder ]
2014-12-23 13:41:02 +01:00
m . fmut . Unlock ( )
2015-03-16 21:14:19 +01:00
// Folders are added to folderRunners only when they are started. We can't
// scan them before they have started, so that's what we need to check for
// here.
2014-12-23 13:41:02 +01:00
if ! ok {
return errors . New ( "no such folder" )
}
2015-07-16 12:52:36 +02:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
return err
}
2015-04-05 22:52:22 +02:00
_ = ignores . Load ( filepath . Join ( folderCfg . Path ( ) , ".stignore" ) ) // Ignore error, there might not be an .stignore
2014-09-04 22:29:53 +02:00
2015-03-08 17:33:41 +00:00
// Required to make sure that we start indexing at a directory we're already
// aware off.
2015-03-27 09:51:18 +01:00
var unifySubs [ ] string
nextSub :
for _ , sub := range subs {
for sub != "" {
2015-07-31 18:52:50 +02:00
parent := filepath . Dir ( sub )
if parent == "." || parent == string ( filepath . Separator ) {
parent = ""
2015-03-27 09:51:18 +01:00
}
2015-07-31 18:52:50 +02:00
if _ , ok = fs . Get ( protocol . LocalDeviceID , parent ) ; ok {
break
2015-03-27 09:51:18 +01:00
}
2015-07-31 18:52:50 +02:00
sub = parent
2015-03-08 17:33:41 +00:00
}
2015-03-27 09:51:18 +01:00
for _ , us := range unifySubs {
if strings . HasPrefix ( sub , us ) {
continue nextSub
}
2015-03-08 17:33:41 +00:00
}
2015-03-27 09:51:18 +01:00
unifySubs = append ( unifySubs , sub )
2015-03-08 17:33:41 +00:00
}
2015-03-27 09:51:18 +01:00
subs = unifySubs
2015-03-08 17:33:41 +00:00
2014-03-29 18:53:48 +01:00
w := & scanner . Walker {
2015-08-26 23:49:06 +01:00
Folder : folderCfg . ID ,
Dir : folderCfg . Path ( ) ,
Subs : subs ,
Matcher : ignores ,
BlockSize : protocol . BlockSize ,
TempNamer : defTempNamer ,
TempLifetime : time . Duration ( m . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
CurrentFiler : cFiler { m , folder } ,
MtimeRepo : db . NewVirtualMtimeRepo ( m . db , folderCfg . ID ) ,
IgnorePerms : folderCfg . IgnorePerms ,
AutoNormalize : folderCfg . AutoNormalize ,
Hashers : m . numHashers ( folder ) ,
ShortID : m . shortID ,
ProgressTickIntervalS : folderCfg . ScanProgressIntervalS ,
2014-08-11 20:20:01 +02:00
}
2014-07-15 14:27:46 +02:00
2015-03-16 21:14:19 +01:00
runner . setState ( FolderScanning )
2014-07-15 14:27:46 +02:00
2015-04-13 05:12:01 +09:00
fchan , err := w . Walk ( )
2014-05-04 18:20:25 +02:00
if err != nil {
2015-06-13 19:10:11 +01:00
// The error we get here is likely an OS level error, which might not be
// as readable as our health check errors. Check if we can get a health
// check error first, and use that if it's available.
if ferr := m . CheckFolderHealth ( folder ) ; ferr != nil {
err = ferr
}
2015-04-13 05:12:01 +09:00
runner . setError ( err )
2014-05-04 18:20:25 +02:00
return err
}
2015-04-13 05:12:01 +09:00
2015-04-17 15:19:40 +09:00
batchSizeFiles := 100
batchSizeBlocks := 2048 // about 256 MB
batch := make ( [ ] protocol . FileInfo , 0 , batchSizeFiles )
blocksHandled := 0
2014-07-15 14:27:46 +02:00
for f := range fchan {
2015-04-17 15:19:40 +09:00
if len ( batch ) == batchSizeFiles || blocksHandled > batchSizeBlocks {
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-03-30 22:49:16 +01:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
2015-03-28 14:25:42 +00:00
return err
}
2015-04-17 14:00:56 +09:00
m . updateLocals ( folder , batch )
2014-07-15 14:27:46 +02:00
batch = batch [ : 0 ]
2015-04-17 15:19:40 +09:00
blocksHandled = 0
2014-07-15 14:27:46 +02:00
}
batch = append ( batch , f )
2015-04-17 15:19:40 +09:00
blocksHandled += len ( f . Blocks )
2014-07-15 14:27:46 +02:00
}
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-03-30 22:49:16 +01:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
2015-03-28 14:25:42 +00:00
return err
} else if len ( batch ) > 0 {
2015-05-14 08:56:42 +02:00
m . updateLocals ( folder , batch )
2014-07-15 14:27:46 +02:00
}
batch = batch [ : 0 ]
2014-08-11 20:20:01 +02:00
// TODO: We should limit the Have scanning to start at sub
seenPrefix := false
2015-05-27 22:46:10 +01:00
var iterError error
2015-01-12 14:50:30 +01:00
fs . WithHaveTruncated ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
2015-03-27 09:51:18 +01:00
hasPrefix := len ( subs ) == 0
for _ , sub := range subs {
if strings . HasPrefix ( f . Name , sub ) {
hasPrefix = true
break
}
}
// Return true so that we keep iterating, until we get to the part
// of the tree we are interested in. Then return false so we stop
// iterating when we've passed the end of the subtree.
if ! hasPrefix {
2014-08-11 20:20:01 +02:00
return ! seenPrefix
}
2014-09-04 22:29:53 +02:00
2014-08-11 20:20:01 +02:00
seenPrefix = true
2014-11-04 23:22:15 +00:00
if ! f . IsDeleted ( ) {
2014-09-04 22:29:53 +02:00
if f . IsInvalid ( ) {
return true
}
2015-04-17 15:19:40 +09:00
if len ( batch ) == batchSizeFiles {
2015-05-27 22:46:10 +01:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
iterError = err
return false
}
2015-04-17 14:00:56 +09:00
m . updateLocals ( folder , batch )
2014-07-15 14:27:46 +02:00
batch = batch [ : 0 ]
}
2014-09-04 22:29:53 +02:00
2015-06-15 00:44:24 +02:00
if ignores . Match ( f . Name ) || symlinkInvalid ( folder , f ) {
2014-11-09 04:26:52 +00:00
// File has been ignored or an unsupported symlink. Set invalid bit.
2014-11-18 22:57:21 +00:00
if debug {
l . Debugln ( "setting invalid bit on ignored" , f )
}
2014-09-04 22:29:53 +02:00
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagInvalid ,
Modified : f . Modified ,
Version : f . Version , // The file is still the same, so don't bump version
}
batch = append ( batch , nf )
2015-04-14 19:31:25 +09:00
} else if _ , err := osutil . Lstat ( filepath . Join ( folderCfg . Path ( ) , f . Name ) ) ; err != nil {
2015-03-01 10:34:32 +01:00
// File has been deleted.
// We don't specifically verify that the error is
// os.IsNotExist because there is a corner case when a
// directory is suddenly transformed into a file. When that
// happens, files that were in the directory (that is now a
// file) are deleted but will return a confusing error ("not a
// directory") when we try to Lstat() them.
2014-08-12 13:53:31 +02:00
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagDeleted ,
Modified : f . Modified ,
2015-03-25 22:37:35 +01:00
Version : f . Version . Update ( m . shortID ) ,
2014-08-12 13:53:31 +02:00
}
batch = append ( batch , nf )
2014-07-15 14:27:46 +02:00
}
}
return true
} )
2015-05-27 22:46:10 +01:00
if iterError != nil {
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , iterError )
return iterError
}
if err := m . CheckFolderHealth ( folder ) ; err != nil {
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
return err
} else if len ( batch ) > 0 {
2015-04-17 14:00:56 +09:00
m . updateLocals ( folder , batch )
2014-07-15 14:27:46 +02:00
}
2015-04-13 05:12:01 +09:00
runner . setState ( FolderIdle )
2014-05-04 18:20:25 +02:00
return nil
2014-03-29 18:53:48 +01:00
}
2015-05-01 14:30:17 +02:00
func ( m * Model ) DelayScan ( folder string , next time . Duration ) {
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 20:46:32 +02:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
func ( m * Model ) numHashers ( folder string ) int {
m . fmut . Lock ( )
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
m . fmut . Unlock ( )
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 10:05:06 +02:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 20:46:32 +02:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2014-09-28 12:00:38 +01:00
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer device
func ( m * Model ) clusterConfig ( device protocol . DeviceID ) protocol . ClusterConfigMessage {
2014-04-13 15:28:26 +02:00
cm := protocol . ClusterConfigMessage {
2014-05-15 00:26:55 -03:00
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
2014-08-14 23:15:26 +01:00
Options : [ ] protocol . Option {
{
Key : "name" ,
2014-09-28 12:00:38 +01:00
Value : m . deviceName ,
2014-08-14 23:15:26 +01:00
} ,
} ,
2014-04-13 15:28:26 +02:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
for _ , folder := range m . deviceFolders [ device ] {
cr := protocol . Folder {
ID : folder ,
2014-01-09 13:58:35 +01:00
}
2014-09-28 12:00:38 +01:00
for _ , device := range m . folderDevices [ folder ] {
// DeviceID is a value type, but with an underlying array. Copy it
// so we don't grab aliases to the same array later on in device[:]
device := device
2014-04-13 15:28:26 +02:00
// TODO: Set read only bit when relevant
2014-09-28 12:00:38 +01:00
cn := protocol . Device {
ID : device [ : ] ,
2014-04-13 15:28:26 +02:00
Flags : protocol . FlagShareTrusted ,
2014-09-23 16:04:20 +02:00
}
2014-10-06 09:25:45 +02:00
if deviceCfg := m . cfg . Devices ( ) [ device ] ; deviceCfg . Introducer {
2014-09-23 16:04:20 +02:00
cn . Flags |= protocol . FlagIntroducer
}
2014-09-28 12:00:38 +01:00
cr . Devices = append ( cr . Devices , cn )
2014-01-09 13:58:35 +01:00
}
2014-09-28 12:00:38 +01:00
cm . Folders = append ( cm . Folders , cr )
2013-12-29 20:33:57 -05:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-13 15:28:26 +02:00
return cm
2013-12-29 20:33:57 -05:00
}
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
func ( m * Model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 21:14:19 +01:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-13 05:12:01 +09:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 21:14:19 +01:00
}
2015-04-13 05:12:01 +09:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 09:58:17 +02:00
}
2014-06-16 10:47:02 +02:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) Override ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2015-03-16 21:14:19 +01:00
runner := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return
}
2014-06-23 11:52:13 +02:00
2015-03-16 21:14:19 +01:00
runner . setState ( FolderScanning )
2014-07-15 17:54:00 +02:00
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2015-01-12 14:50:30 +01:00
fs . WithNeed ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
need := fi . ( protocol . FileInfo )
2014-07-15 17:54:00 +02:00
if len ( batch ) == indexBatchSize {
2015-06-18 10:37:50 +02:00
m . updateLocals ( folder , batch )
2014-07-15 17:54:00 +02:00
batch = batch [ : 0 ]
}
2015-01-06 22:12:45 +01:00
have , ok := fs . Get ( protocol . LocalDeviceID , need . Name )
if ! ok || have . Name != need . Name {
2014-06-16 10:47:02 +02:00
// We are missing the file
2014-07-15 17:54:00 +02:00
need . Flags |= protocol . FlagDeleted
need . Blocks = nil
2015-04-02 10:21:11 +02:00
need . Version = need . Version . Update ( m . shortID )
2014-06-16 10:47:02 +02:00
} else {
// We have the file, replace with our version
2015-04-02 10:21:11 +02:00
have . Version = have . Version . Merge ( need . Version ) . Update ( m . shortID )
2014-07-15 17:54:00 +02:00
need = have
2014-06-16 10:47:02 +02:00
}
2014-07-15 17:54:00 +02:00
need . LocalVersion = 0
batch = append ( batch , need )
return true
} )
if len ( batch ) > 0 {
2015-06-18 10:37:50 +02:00
m . updateLocals ( folder , batch )
2014-06-16 10:47:02 +02:00
}
2015-03-16 21:14:19 +01:00
runner . setState ( FolderIdle )
2014-06-16 10:47:02 +02:00
}
2014-06-20 00:27:54 +02:00
2014-09-28 12:00:38 +01:00
// CurrentLocalVersion returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 14:44:15 +02:00
// changed.
2015-06-24 08:52:38 +01:00
func ( m * Model ) CurrentLocalVersion ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
2014-10-12 10:36:04 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-09-27 14:44:15 +02:00
}
2015-06-24 08:52:38 +01:00
return fs . LocalVersion ( protocol . LocalDeviceID ) , true
2014-09-27 14:44:15 +02:00
}
2014-09-28 12:00:38 +01:00
// RemoteLocalVersion returns the change version for the given folder, as
2014-09-27 14:44:15 +02:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 12:00:38 +01:00
// the remote or global folder has changed.
2015-06-24 08:52:38 +01:00
func ( m * Model ) RemoteLocalVersion ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 17:54:00 +02:00
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-07-15 17:54:00 +02:00
if ! ok {
2014-10-24 14:54:36 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-07-15 17:54:00 +02:00
}
2015-01-18 02:12:06 +01:00
var ver int64
2014-09-28 12:00:38 +01:00
for _ , n := range m . folderDevices [ folder ] {
2014-07-15 17:54:00 +02:00
ver += fs . LocalVersion ( n )
2014-06-20 00:27:54 +02:00
}
2015-06-24 08:52:38 +01:00
return ver , true
2014-06-20 00:27:54 +02:00
}
2014-09-27 14:44:15 +02:00
2015-02-07 10:52:42 +00:00
func ( m * Model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
files . WithPrefixedGlobalTruncated ( prefix , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if f . IsInvalid ( ) || f . IsDeleted ( ) || f . Name == prefix {
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 22:37:04 +09:00
last [ base ] = [ ] interface { } {
time . Unix ( f . Modified , 0 ) , f . Size ( ) ,
2015-02-07 10:52:42 +00:00
}
}
return true
} )
return output
}
2015-03-17 17:52:50 +00:00
func ( m * Model ) Availability ( folder , file string ) [ ] protocol . DeviceID {
2014-10-31 23:41:18 +00:00
// Acquire this lock first, as the value returned from foldersFiles can
2014-12-28 23:11:32 +00:00
// get heavily modified on Close()
2014-10-31 23:41:18 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
return nil
}
2014-10-31 23:41:18 +00:00
availableDevices := [ ] protocol . DeviceID { }
for _ , device := range fs . Availability ( file ) {
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ device ]
2014-10-31 23:41:18 +00:00
if ok {
availableDevices = append ( availableDevices , device )
}
}
return availableDevices
2014-09-27 14:44:15 +02:00
}
2015-04-28 22:32:10 +02:00
// BringToFront bumps the given files priority in the job queue.
2014-12-30 09:35:21 +01:00
func ( m * Model ) BringToFront ( folder , file string ) {
2014-12-01 19:23:06 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 09:35:21 +01:00
runner . BringToFront ( file )
2014-12-01 19:23:06 +00:00
}
}
2015-04-28 22:32:10 +02:00
// CheckFolderHealth checks the folder for common errors and returns the
// current folder error, or nil if the folder is healthy.
2015-03-28 14:25:42 +00:00
func ( m * Model ) CheckFolderHealth ( id string ) error {
2015-09-05 09:43:07 +02:00
if minFree := m . cfg . Options ( ) . MinHomeDiskFreePct ; minFree > 0 {
2015-08-09 10:35:48 +02:00
if free , err := osutil . DiskFreePercentage ( m . cfg . ConfigPath ( ) ) ; err == nil && free < minFree {
return errors . New ( "home disk is out of space" )
}
2015-07-16 12:52:36 +02:00
}
2015-03-28 14:25:42 +00:00
folder , ok := m . cfg . Folders ( ) [ id ]
if ! ok {
2015-04-13 05:12:01 +09:00
return errors . New ( "folder does not exist" )
2015-03-28 14:25:42 +00:00
}
2015-04-05 22:52:22 +02:00
fi , err := os . Stat ( folder . Path ( ) )
2015-06-24 08:52:38 +01:00
if v , ok := m . CurrentLocalVersion ( id ) ; ok && v > 0 {
2015-03-28 14:25:42 +00:00
// Safety check. If the cached index contains files but the
// folder doesn't exist, we have a problem. We would assume
// that all files have been deleted which might not be the case,
// so mark it as invalid instead.
if err != nil || ! fi . IsDir ( ) {
2015-04-13 05:12:01 +09:00
err = errors . New ( "folder path missing" )
2015-03-28 14:25:42 +00:00
} else if ! folder . HasMarker ( ) {
2015-04-13 05:12:01 +09:00
err = errors . New ( "folder marker missing" )
2015-09-05 09:43:07 +02:00
} else if free , errDfp := osutil . DiskFreePercentage ( folder . Path ( ) ) ; errDfp == nil && free < folder . MinDiskFreePct {
2015-07-16 12:52:36 +02:00
err = errors . New ( "out of disk space" )
2015-03-28 14:25:42 +00:00
}
} else if os . IsNotExist ( err ) {
// If we don't have any files in the index, and the directory
// doesn't exist, try creating it.
2015-05-20 22:46:37 +02:00
err = osutil . MkdirAll ( folder . Path ( ) , 0700 )
2015-03-28 14:25:42 +00:00
if err == nil {
err = folder . CreateMarker ( )
}
} else if ! folder . HasMarker ( ) {
// If we don't have any files in the index, and the path does exist
// but the marker is not there, create it.
err = folder . CreateMarker ( )
}
2015-04-13 05:12:01 +09:00
m . fmut . RLock ( )
2015-04-25 15:27:45 +09:00
runner , runnerExists := m . folderRunners [ folder . ID ]
2015-04-13 05:12:01 +09:00
m . fmut . RUnlock ( )
2015-04-25 15:27:45 +09:00
var oldErr error
if runnerExists {
_ , _ , oldErr = runner . getState ( )
}
2015-03-28 14:25:42 +00:00
2015-04-13 05:12:01 +09:00
if err != nil {
if oldErr != nil && oldErr . Error ( ) != err . Error ( ) {
l . Infof ( "Folder %q error changed: %q -> %q" , folder . ID , oldErr , err )
} else if oldErr == nil {
l . Warnf ( "Stopping folder %q - %v" , folder . ID , err )
2015-03-28 14:25:42 +00:00
}
2015-04-25 15:27:45 +09:00
if runnerExists {
runner . setError ( err )
}
2015-04-13 05:12:01 +09:00
} else if oldErr != nil {
l . Infof ( "Folder %q error is cleared, restarting" , folder . ID )
2015-04-25 15:27:45 +09:00
if runnerExists {
2015-06-13 19:10:11 +01:00
runner . clearError ( )
2015-04-25 15:27:45 +09:00
}
2015-03-28 14:25:42 +00:00
}
return err
}
2015-06-21 09:35:41 +02:00
func ( m * Model ) ResetFolder ( folder string ) {
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 20:06:03 +02:00
}
2014-09-27 14:44:15 +02:00
func ( m * Model ) String ( ) string {
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 14:43:01 +02:00
2015-06-03 09:47:39 +02:00
func ( m * Model ) VerifyConfiguration ( from , to config . Configuration ) error {
return nil
}
func ( m * Model ) CommitConfiguration ( from , to config . Configuration ) bool {
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 09:02:55 +02:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 16:13:53 +02:00
for folderID , cfg := range toFolders {
2015-07-22 09:02:55 +02:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 16:13:53 +02:00
// A folder was added.
2015-07-22 09:02:55 +02:00
if debug {
2015-07-23 16:13:53 +02:00
l . Debugln ( m , "adding folder" , folderID )
}
m . AddFolder ( cfg )
if cfg . ReadOnly {
m . StartFolderRO ( folderID )
} else {
m . StartFolderRW ( folderID )
}
// Drop connections to all devices that can now share the new
// folder.
m . pmut . Lock ( )
for _ , dev := range cfg . DeviceIDs ( ) {
2015-06-28 16:05:29 +01:00
if conn , ok := m . conn [ dev ] ; ok {
2015-07-23 16:13:53 +02:00
closeRawConn ( conn )
}
2015-07-22 09:02:55 +02:00
}
2015-07-23 16:13:53 +02:00
m . pmut . Unlock ( )
2015-07-22 09:02:55 +02:00
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
// A folder was removed. Requires restart.
if debug {
l . Debugln ( m , "requires restart, removing folder" , folderID )
}
return false
}
// This folder exists on both sides. Compare the device lists, as we
// can handle adding a device (but not currently removing one).
fromDevs := mapDevices ( fromCfg . DeviceIDs ( ) )
toDevs := mapDevices ( toCfg . DeviceIDs ( ) )
for dev := range fromDevs {
if _ , ok := toDevs [ dev ] ; ! ok {
// A device was removed. Requires restart.
if debug {
l . Debugln ( m , "requires restart, removing device" , dev , "from folder" , folderID )
}
return false
}
}
for dev := range toDevs {
if _ , ok := fromDevs [ dev ] ; ! ok {
// A device was added. Handle it!
m . fmut . Lock ( )
m . pmut . Lock ( )
m . folderCfgs [ folderID ] = toCfg
m . folderDevices [ folderID ] = append ( m . folderDevices [ folderID ] , dev )
m . deviceFolders [ dev ] = append ( m . deviceFolders [ dev ] , folderID )
// If we already have a connection to this device, we should
// disconnect it so that we start sharing the folder with it.
// We close the underlying connection and let the normal error
// handling kick in to clean up and reconnect.
2015-06-28 16:05:29 +01:00
if conn , ok := m . conn [ dev ] ; ok {
2015-07-22 09:02:55 +02:00
closeRawConn ( conn )
}
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
}
// Check if anything else differs, apart from the device list.
fromCfg . Devices = nil
toCfg . Devices = nil
if ! reflect . DeepEqual ( fromCfg , toCfg ) {
if debug {
l . Debugln ( m , "requires restart, folder" , folderID , "configuration differs" )
}
return false
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
// Removing a device requres restart
toDevs := mapDeviceCfgs ( from . Devices )
2015-06-03 09:47:39 +02:00
for _ , dev := range from . Devices {
if _ , ok := toDevs [ dev . DeviceID ] ; ! ok {
2015-07-22 09:02:55 +02:00
if debug {
l . Debugln ( m , "requires restart, device" , dev . DeviceID , "was removed" )
}
2015-06-03 09:47:39 +02:00
return false
}
}
// All of the generic options require restart
if ! reflect . DeepEqual ( from . Options , to . Options ) {
2015-07-22 09:02:55 +02:00
if debug {
l . Debugln ( m , "requires restart, options differ" )
}
2015-06-03 09:47:39 +02:00
return false
}
return true
}
2015-07-22 09:02:55 +02:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
// mapDeviceCfgs returns a map of device ID to nothing for the given slice of
// device configurations.
func mapDeviceCfgs ( devices [ ] config . DeviceConfiguration ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev . DeviceID ] = struct { } { }
}
return m
}
2015-07-21 13:14:33 +02:00
func filterIndex ( folder string , fs [ ] protocol . FileInfo , dropDeletes bool ) [ ] protocol . FileInfo {
for i := 0 ; i < len ( fs ) ; {
if fs [ i ] . Flags &^ protocol . FlagsAll != 0 {
if debug {
l . Debugln ( "dropping update for file with unknown bits set" , fs [ i ] )
}
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else if fs [ i ] . IsDeleted ( ) && dropDeletes {
if debug {
l . Debugln ( "dropping update for undesired delete" , fs [ i ] )
}
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else if symlinkInvalid ( folder , fs [ i ] ) {
if debug {
l . Debugln ( "dropping update for unsupported symlink" , fs [ i ] )
}
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else {
i ++
}
}
return fs
}
2015-06-15 00:44:24 +02:00
func symlinkInvalid ( folder string , fi db . FileIntf ) bool {
if ! symlinks . Supported && fi . IsSymlink ( ) && ! fi . IsInvalid ( ) && ! fi . IsDeleted ( ) {
symlinkWarning . Do ( func ( ) {
2015-04-28 18:34:55 +03:00
l . Warnln ( "Symlinks are disabled, unsupported or require Administrator privileges. This might cause your folder to appear out of sync." )
2014-11-09 04:26:52 +00:00
} )
2015-06-15 00:44:24 +02:00
// Need to type switch for the concrete type to be able to access fields...
var name string
switch fi := fi . ( type ) {
case protocol . FileInfo :
name = fi . Name
case db . FileInfoTruncated :
name = fi . Name
}
l . Infoln ( "Unsupported symlink" , name , "in folder" , folder )
2014-11-09 04:26:52 +00:00
return true
}
return false
}
2015-04-25 22:53:44 +01:00
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk ( data [ ] string , skip , get int ) ( [ ] string , int , int ) {
l := len ( data )
if l <= skip {
return [ ] string { } , skip - l , get
} else if l < skip + get {
return data [ skip : l ] , 0 , get - ( l - skip )
}
return data [ skip : skip + get ] , 0 , 0
}
2015-07-22 09:02:55 +02:00
func closeRawConn ( conn io . Closer ) error {
if conn , ok := conn . ( * tls . Conn ) ; ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline set.
conn . SetWriteDeadline ( time . Now ( ) . Add ( 250 * time . Millisecond ) )
}
return conn . Close ( )
}