2014-11-16 21:13:20 +01:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 21:43:32 +02:00
//
2015-03-07 21:36:35 +01:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
2014-06-01 22:50:14 +02:00
2014-05-15 00:26:55 -03:00
package model
2013-12-15 11:43:31 +01:00
import (
2014-09-14 23:03:53 +01:00
"bufio"
2014-09-10 08:48:15 +02:00
"crypto/tls"
2015-03-10 23:45:43 +01:00
"encoding/json"
2014-01-06 21:31:36 +01:00
"errors"
2013-12-23 12:12:44 -05:00
"fmt"
2013-12-31 21:22:49 -05:00
"io"
2014-01-05 23:54:57 +01:00
"net"
2013-12-15 11:43:31 +01:00
"os"
2014-03-28 14:36:57 +01:00
"path/filepath"
2015-06-03 09:47:39 +02:00
"reflect"
2015-04-29 20:46:32 +02:00
"runtime"
2016-03-18 08:28:44 +00:00
"sort"
2014-08-11 20:20:01 +02:00
"strings"
2015-04-22 23:54:31 +01:00
stdsync "sync"
2013-12-15 11:43:31 +01:00
"time"
2014-06-21 09:43:12 +02:00
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/config"
2016-05-04 19:38:12 +00:00
"github.com/syncthing/syncthing/lib/connections"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 19:38:46 +02:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 11:29:25 +02:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/symlinks"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/versioner"
2015-06-12 13:04:00 +02:00
"github.com/thejerf/suture"
2013-12-15 11:43:31 +01:00
)
2014-07-15 13:04:37 +02:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 20:54:59 +02:00
const (
2015-10-13 22:59:31 +09:00
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
indexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
2014-08-11 20:54:59 +02:00
)
2014-07-15 13:04:37 +02:00
2014-09-30 17:52:05 +02:00
type service interface {
Serve ( )
Stop ( )
2014-12-30 09:31:34 +01:00
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2014-12-30 09:35:21 +01:00
BringToFront ( string )
2015-05-01 14:30:17 +02:00
DelayScan ( d time . Duration )
2015-05-07 22:45:07 +02:00
IndexUpdated ( ) // Remote index was updated notification
2015-06-20 19:26:25 +02:00
Scan ( subs [ ] string ) error
2015-03-16 21:14:19 +01:00
2015-04-13 05:12:01 +09:00
setState ( state folderState )
setError ( err error )
2015-06-13 19:10:11 +01:00
clearError ( )
2015-04-13 05:12:01 +09:00
getState ( ) ( folderState , time . Time , error )
2014-09-30 17:52:05 +02:00
}
2016-04-15 10:59:41 +00:00
type Availability struct {
ID protocol . DeviceID ` json:"id" `
FromTemporary bool ` json:"fromTemporary" `
}
2013-12-15 11:43:31 +01:00
type Model struct {
2015-06-12 13:04:00 +02:00
* suture . Supervisor
2015-07-23 16:13:53 +02:00
cfg * config . Wrapper
2015-10-31 12:31:25 +01:00
db * db . Instance
2015-07-23 16:13:53 +02:00
finder * db . BlockFinder
progressEmitter * ProgressEmitter
id protocol . DeviceID
2016-01-20 11:10:22 -08:00
shortID protocol . ShortID
2015-07-23 16:13:53 +02:00
cacheIgnoredFiles bool
2015-10-18 20:13:58 -04:00
protectedFiles [ ] string
2014-05-15 00:26:55 -03:00
2014-09-28 12:05:25 +01:00
deviceName string
2014-05-15 00:26:55 -03:00
clientName string
clientVersion string
2015-11-13 13:30:52 +01:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
folderFiles map [ string ] * db . FileSet // folder -> files
folderDevices map [ string ] [ ] protocol . DeviceID // folder -> deviceIDs
deviceFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
folderRunners map [ string ] service // folder -> puller or scanner
folderRunnerTokens map [ string ] [ ] suture . ServiceToken // folder -> tokens for puller or scanner
folderStatRefs map [ string ] * stats . FolderStatisticsReference // folder -> statsRef
fmut sync . RWMutex // protects the above
2014-03-29 18:53:48 +01:00
2016-05-04 19:38:12 +00:00
conn map [ protocol . DeviceID ] connections . Connection
2016-04-15 10:59:41 +00:00
helloMessages map [ protocol . DeviceID ] protocol . HelloMessage
deviceClusterConf map [ protocol . DeviceID ] protocol . ClusterConfigMessage
devicePaused map [ protocol . DeviceID ] bool
deviceDownloads map [ protocol . DeviceID ] * deviceDownloadState
pmut sync . RWMutex // protects the above
2013-12-15 11:43:31 +01:00
}
2016-05-04 10:47:33 +00:00
type folderFactory func ( * Model , config . FolderConfiguration , versioner . Versioner ) service
2014-01-07 22:44:21 +01:00
var (
2016-05-04 10:47:33 +00:00
symlinkWarning = stdsync . Once { }
2016-05-04 11:26:36 +00:00
folderFactories = make ( map [ config . FolderType ] folderFactory , 0 )
2014-01-07 22:44:21 +01:00
)
2014-01-06 21:31:36 +01:00
2014-01-06 11:11:18 +01:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 12:00:38 +01:00
// for file data without altering the local folder in any way.
2015-10-31 12:31:25 +01:00
func NewModel ( cfg * config . Wrapper , id protocol . DeviceID , deviceName , clientName , clientVersion string , ldb * db . Instance , protectedFiles [ ] string ) * Model {
2013-12-15 11:43:31 +01:00
m := & Model {
2015-07-11 11:12:20 +10:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( line )
2015-07-11 11:12:20 +10:00
} ,
} ) ,
2015-11-13 13:30:52 +01:00
cfg : cfg ,
db : ldb ,
finder : db . NewBlockFinder ( ldb ) ,
progressEmitter : NewProgressEmitter ( cfg ) ,
id : id ,
shortID : id . Short ( ) ,
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
protectedFiles : protectedFiles ,
deviceName : deviceName ,
clientName : clientName ,
clientVersion : clientVersion ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
folderDevices : make ( map [ string ] [ ] protocol . DeviceID ) ,
deviceFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderRunnerTokens : make ( map [ string ] [ ] suture . ServiceToken ) ,
folderStatRefs : make ( map [ string ] * stats . FolderStatisticsReference ) ,
2016-05-04 19:38:12 +00:00
conn : make ( map [ protocol . DeviceID ] connections . Connection ) ,
2016-03-25 20:29:07 +00:00
helloMessages : make ( map [ protocol . DeviceID ] protocol . HelloMessage ) ,
2016-04-15 10:59:41 +00:00
deviceClusterConf : make ( map [ protocol . DeviceID ] protocol . ClusterConfigMessage ) ,
2015-11-13 13:30:52 +01:00
devicePaused : make ( map [ protocol . DeviceID ] bool ) ,
2016-04-15 10:59:41 +00:00
deviceDownloads : make ( map [ protocol . DeviceID ] * deviceDownloadState ) ,
fmut : sync . NewRWMutex ( ) ,
pmut : sync . NewRWMutex ( ) ,
2013-12-15 11:43:31 +01:00
}
2014-11-25 22:07:18 +00:00
if cfg . Options ( ) . ProgressUpdateIntervalS > - 1 {
go m . progressEmitter . Serve ( )
}
2013-12-15 11:43:31 +01:00
return m
}
2015-04-28 22:32:10 +02:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2015-04-08 13:35:03 +01:00
func ( m * Model ) StartDeadlockDetector ( timeout time . Duration ) {
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2015-04-22 23:54:31 +01:00
deadlockDetect ( m . fmut , timeout )
deadlockDetect ( m . pmut , timeout )
2015-04-08 13:35:03 +01:00
}
2016-05-04 10:47:33 +00:00
// StartFolder constrcuts the folder service and starts it.
func ( m * Model ) StartFolder ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2014-09-27 14:44:15 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
panic ( "cannot start nonexistent folder " + folder )
2014-09-27 14:44:15 +02:00
}
2014-09-30 17:52:05 +02:00
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
2014-09-27 14:44:15 +02:00
2016-05-04 10:47:33 +00:00
folderFactory , ok := folderFactories [ cfg . Type ]
if ! ok {
2016-05-04 11:26:36 +00:00
panic ( fmt . Sprintf ( "unknown folder type 0x%x" , cfg . Type ) )
2016-05-04 10:47:33 +00:00
}
var ver versioner . Versioner
2014-09-27 14:44:15 +02:00
if len ( cfg . Versioning . Type ) > 0 {
2016-05-04 10:47:33 +00:00
versionerFactory , ok := versioner . Factories [ cfg . Versioning . Type ]
2014-09-27 14:44:15 +02:00
if ! ok {
l . Fatalf ( "Requested versioning type %q that does not exist" , cfg . Versioning . Type )
}
2015-06-20 20:04:47 +02:00
2016-05-04 10:47:33 +00:00
ver = versionerFactory ( folder , cfg . Path ( ) , cfg . Versioning . Params )
if service , ok := ver . ( suture . Service ) ; ok {
2015-06-12 13:04:00 +02:00
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
2015-11-13 13:30:52 +01:00
token := m . Add ( service )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
2015-06-12 13:04:00 +02:00
}
2014-03-29 18:53:48 +01:00
}
2014-09-27 14:44:15 +02:00
2016-05-04 10:47:33 +00:00
p := folderFactory ( m , cfg , ver )
m . folderRunners [ folder ] = p
2015-10-18 20:13:58 -04:00
m . warnAboutOverwritingProtectedFiles ( folder )
2015-11-13 13:30:52 +01:00
token := m . Add ( p )
m . folderRunnerTokens [ folder ] = append ( m . folderRunnerTokens [ folder ] , token )
m . fmut . Unlock ( )
2015-07-23 16:13:53 +02:00
2016-05-04 10:47:33 +00:00
l . Infoln ( "Ready to synchronize" , folder , fmt . Sprintf ( "(%s)" , cfg . Type ) )
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
2015-10-18 20:13:58 -04:00
func ( m * Model ) warnAboutOverwritingProtectedFiles ( folder string ) {
2016-05-04 10:47:33 +00:00
if m . folderCfgs [ folder ] . Type == config . FolderTypeReadOnly {
2015-10-18 20:13:58 -04:00
return
}
folderLocation := m . folderCfgs [ folder ] . Path ( )
ignores := m . folderIgnores [ folder ]
var filesAtRisk [ ] string
for _ , protectedFilePath := range m . protectedFiles {
// check if file is synced in this folder
if ! strings . HasPrefix ( protectedFilePath , folderLocation ) {
continue
}
// check if file is ignored
2016-04-07 09:34:07 +00:00
if ignores . Match ( protectedFilePath ) . IsIgnored ( ) {
2015-10-18 20:13:58 -04:00
continue
}
filesAtRisk = append ( filesAtRisk , protectedFilePath )
}
if len ( filesAtRisk ) > 0 {
l . Warnln ( "Some protected files may be overwritten and cause issues. See http://docs.syncthing.net/users/config.html#syncing-configuration-files for more information. The at risk files are:" , strings . Join ( filesAtRisk , ", " ) )
}
}
2015-11-13 13:30:52 +01:00
func ( m * Model ) RemoveFolder ( folder string ) {
m . fmut . Lock ( )
m . pmut . Lock ( )
// Stop the services running for this folder
for _ , id := range m . folderRunnerTokens [ folder ] {
m . Remove ( id )
}
// Close connections to affected devices
for _ , dev := range m . folderDevices [ folder ] {
if conn , ok := m . conn [ dev ] ; ok {
closeRawConn ( conn )
}
}
// Clean up our config maps
delete ( m . folderCfgs , folder )
delete ( m . folderFiles , folder )
delete ( m . folderDevices , folder )
delete ( m . folderIgnores , folder )
delete ( m . folderRunners , folder )
delete ( m . folderRunnerTokens , folder )
delete ( m . folderStatRefs , folder )
for dev , folders := range m . deviceFolders {
m . deviceFolders [ dev ] = stringSliceWithout ( folders , folder )
}
// Remove it from the database
db . DropFolder ( m . db , folder )
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
2014-01-05 23:54:57 +01:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 21:56:10 +02:00
Connected bool
Paused bool
2014-01-23 13:12:45 +01:00
Address string
ClientVersion string
2016-05-04 19:38:12 +00:00
Type string
2014-01-05 23:54:57 +01:00
}
2015-03-10 23:45:43 +01:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 21:56:10 +02:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 23:45:43 +01:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2016-05-04 19:38:12 +00:00
"type" : info . Type ,
2015-03-10 23:45:43 +01:00
} )
}
2015-11-09 23:48:58 +01:00
// ConnectionStats returns a map with connection statistics for each device.
2015-04-07 13:20:40 +01:00
func ( m * Model ) ConnectionStats ( ) map [ string ] interface { } {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-01-05 16:16:37 +01:00
2015-08-23 21:56:10 +02:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
for device := range devs {
2016-03-25 20:29:07 +00:00
hello := m . helloMessages [ device ]
versionString := hello . ClientVersion
if hello . ClientName != "syncthing" {
versionString = hello . ClientName + " " + hello . ClientVersion
}
2014-01-05 23:54:57 +01:00
ci := ConnectionInfo {
2016-05-04 19:38:12 +00:00
ClientVersion : strings . TrimSpace ( versionString ) ,
2015-08-23 21:56:10 +02:00
Paused : m . devicePaused [ device ] ,
2014-01-05 23:54:57 +01:00
}
2015-08-23 21:56:10 +02:00
if conn , ok := m . conn [ device ] ; ok {
2015-07-17 21:22:07 +01:00
ci . Type = conn . Type
2015-08-23 21:56:10 +02:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 23:54:57 +01:00
}
2014-02-13 12:41:37 +01:00
2015-04-07 13:20:40 +01:00
conns [ device . String ( ) ] = ci
2013-12-30 09:30:29 -05:00
}
2014-01-17 20:06:44 -07:00
2015-04-07 13:20:40 +01:00
res [ "connections" ] = conns
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-03-28 14:36:57 +01:00
2014-05-24 21:34:11 +02:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 21:56:05 +02:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 21:34:11 +02:00
} ,
}
2014-01-05 16:16:37 +01:00
return res
2013-12-30 09:30:29 -05:00
}
2015-04-28 22:32:10 +02:00
// DeviceStatistics returns statistics about each device
2014-09-28 12:00:38 +01:00
func ( m * Model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
var res = make ( map [ string ] stats . DeviceStatistics )
2014-10-06 09:25:45 +02:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 23:45:40 +01:00
}
return res
}
2015-04-28 22:32:10 +02:00
// FolderStatistics returns statistics about each folder
2014-12-07 20:21:12 +00:00
func ( m * Model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
var res = make ( map [ string ] stats . FolderStatistics )
for id := range m . cfg . Folders ( ) {
res [ id ] = m . folderStatRef ( id ) . GetStatistics ( )
}
return res
}
2015-04-28 22:32:10 +02:00
// Completion returns the completion status, in percent, for the given device
// and folder.
2014-09-28 12:00:38 +01:00
func ( m * Model ) Completion ( device protocol . DeviceID , folder string ) float64 {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-08-05 20:16:25 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
return 0 // Folder doesn't exist, so we hardly have any of it
2014-08-05 20:16:25 +02:00
}
2015-10-21 09:10:26 +02:00
_ , _ , tot := rf . GlobalSize ( )
2014-08-05 20:16:25 +02:00
if tot == 0 {
2014-09-28 12:00:38 +01:00
return 100 // Folder is empty, so we have all of it
2014-08-05 20:16:25 +02:00
}
2016-05-26 06:53:27 +00:00
m . pmut . RLock ( )
counts := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
m . pmut . RUnlock ( )
var need , fileNeed , downloaded int64
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2016-05-26 06:53:27 +00:00
ft := f . ( db . FileInfoTruncated )
// This might might be more than it really is, because some blocks can be of a smaller size.
downloaded = int64 ( counts [ ft . Name ] * protocol . BlockSize )
fileNeed = ft . Size ( ) - downloaded
if fileNeed < 0 {
fileNeed = 0
}
need += fileNeed
2014-07-29 11:06:52 +02:00
return true
} )
2015-10-21 09:10:26 +02:00
needRatio := float64 ( need ) / float64 ( tot )
completionPct := 100 * ( 1 - needRatio )
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d = %f)" , m , device , folder , completionPct , need , tot , needRatio )
2014-08-12 13:53:31 +02:00
2015-10-21 09:10:26 +02:00
return completionPct
2014-07-29 11:06:52 +02:00
}
2015-01-12 14:50:30 +01:00
func sizeOfFile ( f db . FileIntf ) ( files , deleted int , bytes int64 ) {
2014-08-12 13:53:31 +02:00
if ! f . IsDeleted ( ) {
2014-07-06 14:46:48 +02:00
files ++
} else {
deleted ++
2013-12-30 09:30:29 -05:00
}
2014-08-12 13:53:31 +02:00
bytes += f . Size ( )
2014-01-05 16:16:37 +01:00
return
}
2013-12-30 09:30:29 -05:00
2014-03-28 14:36:57 +01:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2015-01-09 08:18:42 +01:00
func ( m * Model ) GlobalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-10-21 09:10:26 +02:00
nfiles , deleted , bytes = rf . GlobalSize ( )
2014-03-29 18:53:48 +01:00
}
2014-07-06 14:46:48 +02:00
return
2014-03-28 14:36:57 +01:00
}
2014-01-06 11:11:18 +01:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 12:00:38 +01:00
// files in the local folder.
2015-01-09 08:18:42 +01:00
func ( m * Model ) LocalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-10-21 09:10:26 +02:00
nfiles , deleted , bytes = rf . LocalSize ( )
2014-03-29 18:53:48 +01:00
}
2014-07-06 23:15:28 +02:00
return
2014-01-06 06:38:01 +01:00
}
2014-05-19 22:31:28 +02:00
// NeedSize returns the number and total size of currently needed files.
2015-01-09 08:18:42 +01:00
func ( m * Model ) NeedSize ( folder string ) ( nfiles int , bytes int64 ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 12:00:38 +01:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 14:50:30 +01:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-07-15 17:54:00 +02:00
fs , de , by := sizeOfFile ( f )
2015-01-09 08:18:42 +01:00
nfiles += fs + de
2014-07-15 17:54:00 +02:00
bytes += by
return true
} )
}
2014-11-16 23:18:59 +00:00
bytes -= m . progressEmitter . BytesCompleted ( folder )
2015-10-03 17:25:21 +02:00
l . Debugf ( "%v NeedSize(%q): %d %d" , m , folder , nfiles , bytes )
2014-07-15 17:54:00 +02:00
return
2013-12-23 12:12:44 -05:00
}
2015-04-28 22:32:10 +02:00
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2015-04-25 22:53:44 +01:00
func ( m * Model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , int ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2015-01-17 21:51:46 +01:00
2015-04-25 22:53:44 +01:00
total := 0
2014-12-01 19:23:06 +00:00
2015-04-25 22:53:44 +01:00
rf , ok := m . folderFiles [ folder ]
if ! ok {
return nil , nil , nil , 0
}
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 09:31:34 +01:00
2015-04-25 22:53:44 +01:00
runner , ok := m . folderRunners [ folder ]
if ok {
allProgressNames , allQueuedNames := runner . Jobs ( )
var progressNames , queuedNames [ ] string
progressNames , skip , get = getChunk ( allProgressNames , skip , get )
queuedNames , skip , get = getChunk ( allQueuedNames , skip , get )
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
seen [ name ] = struct { } { }
2014-12-01 19:23:06 +00:00
}
}
2015-04-25 22:53:44 +01:00
for i , name := range queuedNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
seen [ name ] = struct { } { }
}
2014-12-01 19:23:06 +00:00
}
2014-04-09 22:03:30 +02:00
}
2015-04-25 22:53:44 +01:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
total ++
if skip > 0 {
skip --
return true
}
if get > 0 {
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
}
}
return true
} )
return progress , queued , rest , total
2014-04-01 23:18:32 +02:00
}
2014-09-28 12:00:38 +01:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2015-01-14 22:11:31 +00:00
func ( m * Model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , flags uint32 , options [ ] protocol . Option ) {
2015-01-14 22:28:19 +00:00
if flags != 0 {
l . Warnln ( "protocol error: unknown flags 0x%x in Index message" , flags )
return
}
2015-10-03 17:25:21 +02:00
l . Debugf ( "IDX(in): %s %q: %d files" , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
2016-01-01 20:11:12 +01:00
l . Debugf ( "Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-07-21 13:14:33 +02:00
cfg := m . folderCfgs [ folder ]
2014-09-28 12:00:38 +01:00
files , ok := m . folderFiles [ folder ]
2015-05-07 22:45:07 +02:00
runner := m . folderRunners [ folder ]
2016-04-18 18:35:31 +00:00
ignores := m . folderIgnores [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
2015-05-07 22:45:07 +02:00
if runner != nil {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
defer runner . IndexUpdated ( )
}
2014-09-04 22:29:53 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
l . Fatalf ( "Index for nonexistant folder %q" , folder )
2013-12-15 11:43:31 +01:00
}
2014-07-13 21:07:24 +02:00
2016-05-01 06:49:29 +00:00
m . pmut . RLock ( )
m . deviceDownloads [ deviceID ] . Update ( folder , makeForgetUpdate ( fs ) )
m . pmut . RUnlock ( )
2016-04-18 18:35:31 +00:00
fs = filterIndex ( folder , fs , cfg . IgnoreDelete , ignores )
2014-09-28 12:00:38 +01:00
files . Replace ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2014-09-28 12:00:38 +01:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2013-12-28 08:10:36 -05:00
}
2014-09-28 12:00:38 +01:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2015-01-14 22:11:31 +00:00
func ( m * Model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , flags uint32 , options [ ] protocol . Option ) {
2015-01-14 22:28:19 +00:00
if flags != 0 {
l . Warnln ( "protocol error: unknown flags 0x%x in IndexUpdate message" , flags )
return
}
2015-10-03 17:25:21 +02:00
l . Debugf ( "%v IDXUP(in): %s / %q: %d files" , m , deviceID , folder , len ( fs ) )
2014-03-29 18:53:48 +01:00
2014-09-28 12:00:38 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
2016-01-01 20:11:12 +01:00
l . Debugf ( "Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 21:48:29 +02:00
return
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-05-07 22:45:07 +02:00
files := m . folderFiles [ folder ]
2015-07-21 13:14:33 +02:00
cfg := m . folderCfgs [ folder ]
2015-05-07 22:45:07 +02:00
runner , ok := m . folderRunners [ folder ]
2016-04-18 18:35:31 +00:00
ignores := m . folderIgnores [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-09-04 22:29:53 +02:00
if ! ok {
2014-09-28 12:00:38 +01:00
l . Fatalf ( "IndexUpdate for nonexistant folder %q" , folder )
2013-12-28 08:10:36 -05:00
}
2014-07-13 21:07:24 +02:00
2016-05-01 06:49:29 +00:00
m . pmut . RLock ( )
m . deviceDownloads [ deviceID ] . Update ( folder , makeForgetUpdate ( fs ) )
m . pmut . RUnlock ( )
2016-04-18 18:35:31 +00:00
fs = filterIndex ( folder , fs , cfg . IgnoreDelete , ignores )
2014-09-28 12:00:38 +01:00
files . Update ( deviceID , fs )
2014-09-04 22:29:53 +02:00
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 12:05:25 +01:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 13:38:36 +02:00
"items" : len ( fs ) ,
2014-09-28 12:00:38 +01:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 21:07:24 +02:00
} )
2015-05-07 22:45:07 +02:00
runner . IndexUpdated ( )
2014-01-09 10:59:09 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) folderSharedWith ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2016-01-01 20:11:12 +01:00
return m . folderSharedWithUnlocked ( folder , deviceID )
}
func ( m * Model ) folderSharedWithUnlocked ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 12:00:38 +01:00
for _ , nfolder := range m . deviceFolders [ deviceID ] {
if nfolder == folder {
2014-06-06 21:48:29 +02:00
return true
}
}
return false
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfigMessage ) {
2016-01-01 20:11:12 +01:00
// Check the peer device's announced folders against our own. Emits events
// for folders that we don't expect (unknown or not shared).
2016-04-15 10:59:41 +00:00
// Also, collect a list of folders we do share, and if he's interested in
// temporary indexes, subscribe the connection.
tempIndexFolders := make ( [ ] string , 0 , len ( cm . Folders ) )
2016-01-01 20:11:12 +01:00
m . fmut . Lock ( )
nextFolder :
for _ , folder := range cm . Folders {
cfg := m . folderCfgs [ folder . ID ]
2016-01-12 12:12:33 +01:00
if folder . Flags &^ protocol . FlagFolderAll != 0 {
// There are flags set that we don't know what they mean. Scary!
l . Warnf ( "Device %v: unknown flags for folder %s" , deviceID , folder . ID )
cfg . Invalid = fmt . Sprintf ( "Unknown flags from device %v" , deviceID )
2016-01-01 20:11:12 +01:00
m . cfg . SetFolder ( cfg )
if srv := m . folderRunners [ folder . ID ] ; srv != nil {
srv . setError ( fmt . Errorf ( cfg . Invalid ) )
}
continue nextFolder
}
if ! m . folderSharedWithUnlocked ( folder . ID , deviceID ) {
events . Default . Log ( events . FolderRejected , map [ string ] string {
2016-03-11 09:48:46 +00:00
"folder" : folder . ID ,
"folderLabel" : folder . Label ,
"device" : deviceID . String ( ) ,
2016-01-01 20:11:12 +01:00
} )
l . Infof ( "Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder . ID , deviceID )
continue
}
2016-04-15 10:59:41 +00:00
if folder . Flags & protocol . FlagFolderDisabledTempIndexes == 0 {
tempIndexFolders = append ( tempIndexFolders , folder . ID )
}
2016-01-01 20:11:12 +01:00
}
m . fmut . Unlock ( )
2016-04-15 10:59:41 +00:00
// This breaks if we send multiple CM messages during the same connection.
if len ( tempIndexFolders ) > 0 {
m . pmut . RLock ( )
conn , ok := m . conn [ deviceID ]
m . pmut . RUnlock ( )
// In case we've got ClusterConfig, and the connection disappeared
// from infront of our nose.
if ok {
m . progressEmitter . temporaryIndexSubscribe ( conn , tempIndexFolders )
}
}
2014-11-12 23:42:17 +00:00
var changed bool
2014-10-06 09:25:45 +02:00
if m . cfg . Devices ( ) [ deviceID ] . Introducer {
2014-09-28 12:00:38 +01:00
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing.
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
for _ , folder := range cm . Folders {
if _ , ok := m . folderDevices [ folder . ID ] ; ! ok {
2014-09-23 16:04:20 +02:00
continue
}
2014-09-28 12:00:38 +01:00
nextDevice :
for _ , device := range folder . Devices {
var id protocol . DeviceID
copy ( id [ : ] , device . ID )
2014-09-23 16:04:20 +02:00
2014-10-06 09:25:45 +02:00
if _ , ok := m . cfg . Devices ( ) [ id ] ; ! ok {
2014-09-28 12:00:38 +01:00
// The device is currently unknown. Add it to the config.
2014-09-23 16:04:20 +02:00
2015-09-27 11:39:02 +01:00
addresses := [ ] string { "dynamic" }
for _ , addr := range device . Addresses {
if addr != "dynamic" {
addresses = append ( addresses , addr )
}
}
2014-09-28 12:00:38 +01:00
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , id , deviceID )
newDeviceCfg := config . DeviceConfiguration {
2014-10-06 09:25:45 +02:00
DeviceID : id ,
2015-09-27 11:39:02 +01:00
Name : device . Name ,
2014-12-07 22:43:30 +00:00
Compression : m . cfg . Devices ( ) [ deviceID ] . Compression ,
2015-09-27 11:39:02 +01:00
Addresses : addresses ,
CertName : device . CertName ,
2014-09-23 16:04:20 +02:00
}
// The introducers' introducers are also our introducers.
2014-09-28 12:00:38 +01:00
if device . Flags & protocol . FlagIntroducer != 0 {
l . Infof ( "Device %v is now also an introducer" , id )
newDeviceCfg . Introducer = true
2014-09-23 16:04:20 +02:00
}
2014-10-06 09:25:45 +02:00
m . cfg . SetDevice ( newDeviceCfg )
2014-09-23 16:04:20 +02:00
changed = true
}
2014-09-28 12:00:38 +01:00
for _ , er := range m . deviceFolders [ id ] {
if er == folder . ID {
// We already share the folder with this device, so
2014-09-23 16:04:20 +02:00
// nothing to do.
2014-09-28 12:00:38 +01:00
continue nextDevice
2014-09-23 16:04:20 +02:00
}
}
2014-09-28 12:00:38 +01:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
l . Infof ( "Adding device %v to share %q (vouched for by introducer %v)" , id , folder . ID , deviceID )
2014-09-23 16:04:20 +02:00
2014-09-28 12:00:38 +01:00
m . deviceFolders [ id ] = append ( m . deviceFolders [ id ] , folder . ID )
m . folderDevices [ folder . ID ] = append ( m . folderDevices [ folder . ID ] , id )
2014-09-23 16:04:20 +02:00
2014-10-06 09:25:45 +02:00
folderCfg := m . cfg . Folders ( ) [ folder . ID ]
2014-09-28 12:00:38 +01:00
folderCfg . Devices = append ( folderCfg . Devices , config . FolderDeviceConfiguration {
DeviceID : id ,
2014-09-23 16:04:20 +02:00
} )
2014-10-06 09:25:45 +02:00
m . cfg . SetFolder ( folderCfg )
2014-09-23 16:04:20 +02:00
changed = true
}
}
2014-11-12 23:42:17 +00:00
}
2014-09-23 16:04:20 +02:00
2014-11-12 23:42:17 +00:00
if changed {
m . cfg . Save ( )
2014-09-23 16:04:20 +02:00
}
2014-04-13 15:28:26 +02:00
}
2014-01-20 22:22:27 +01:00
// Close removes the peer from the model and closes the underlying connection if possible.
2014-01-06 11:11:18 +01:00
// Implements the protocol.Model interface.
2014-09-28 12:00:38 +01:00
func ( m * Model ) Close ( device protocol . DeviceID , err error ) {
l . Infof ( "Connection to %s closed: %v" , device , err )
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
2014-07-13 21:07:24 +02:00
"error" : err . Error ( ) ,
} )
2014-02-09 23:13:06 +01:00
2014-07-15 13:04:37 +02:00
m . pmut . Lock ( )
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
for _ , folder := range m . deviceFolders [ device ] {
m . folderFiles [ folder ] . Replace ( device , nil )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-01-20 22:22:27 +01:00
2015-06-28 16:05:29 +01:00
conn , ok := m . conn [ device ]
2014-01-01 08:09:17 -05:00
if ok {
2016-04-15 10:59:41 +00:00
m . progressEmitter . temporaryIndexUnsubscribe ( conn )
2015-07-22 09:02:55 +02:00
closeRawConn ( conn )
2013-12-30 21:21:57 -05:00
}
2015-06-28 16:05:29 +01:00
delete ( m . conn , device )
2016-03-25 20:29:07 +00:00
delete ( m . helloMessages , device )
2016-04-15 10:59:41 +00:00
delete ( m . deviceClusterConf , device )
delete ( m . deviceDownloads , device )
2014-01-17 20:06:44 -07:00
m . pmut . Unlock ( )
2013-12-15 11:43:31 +01:00
}
2014-01-06 11:11:18 +01:00
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2015-07-29 21:38:22 +01:00
func ( m * Model ) Request ( deviceID protocol . DeviceID , folder , name string , offset int64 , hash [ ] byte , flags uint32 , options [ ] protocol . Option , buf [ ] byte ) error {
if offset < 0 {
2015-10-19 14:13:47 +02:00
return protocol . ErrInvalid
2015-01-18 02:12:06 +01:00
}
2015-01-16 12:25:54 +01:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2016-04-15 10:59:41 +00:00
return protocol . ErrNoSuchFile
2015-01-16 12:25:54 +01:00
}
2016-04-15 10:59:41 +00:00
if flags != 0 && flags != protocol . FlagFromTemporary {
// We currently support only no flags, or FromTemporary flag.
return fmt . Errorf ( "protocol error: unknown flags 0x%x in Request message" , flags )
2015-01-14 22:28:19 +00:00
}
2015-10-03 17:25:21 +02:00
if deviceID != protocol . LocalDeviceID {
2016-04-15 10:59:41 +00:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d f=%d" , m , deviceID , folder , name , offset , len ( buf ) , flags )
2013-12-15 11:43:31 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2016-04-15 10:59:41 +00:00
folderCfg := m . folderCfgs [ folder ]
folderPath := folderCfg . Path ( )
2015-10-13 22:59:31 +09:00
folderIgnores := m . folderIgnores [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-11-09 04:26:52 +00:00
2015-10-13 22:59:31 +09:00
// filepath.Join() returns a filepath.Clean()ed path, which (quoting the
// docs for clarity here):
//
// Clean returns the shortest path name equivalent to path by purely lexical
// processing. It applies the following rules iteratively until no further
// processing can be done:
//
// 1. Replace multiple Separator elements with a single one.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path,
// assuming Separator is '/'.
fn := filepath . Join ( folderPath , name )
if ! strings . HasPrefix ( fn , folderPath ) {
// Request tries to escape!
l . Debugf ( "%v Invalid REQ(in) tries to escape: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
2015-10-19 14:13:47 +02:00
return protocol . ErrInvalid
2015-10-13 22:59:31 +09:00
}
if folderIgnores != nil {
// "rn" becomes the relative name of the file within the folder. This is
// different than the original "name" parameter in that it's been
// cleaned from any possible funny business.
if rn , err := filepath . Rel ( folderPath , fn ) ; err != nil {
return err
2016-04-07 09:34:07 +00:00
} else if folderIgnores . Match ( rn ) . IsIgnored ( ) {
2015-10-13 22:59:31 +09:00
l . Debugf ( "%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
2015-10-19 14:13:47 +02:00
return protocol . ErrNoSuchFile
2015-10-13 22:59:31 +09:00
}
}
2015-05-25 11:05:12 +02:00
if info , err := os . Lstat ( fn ) ; err == nil && info . Mode ( ) & os . ModeSymlink != 0 {
2014-11-09 04:26:52 +00:00
target , _ , err := symlinks . Read ( fn )
if err != nil {
2015-10-19 14:13:47 +02:00
l . Debugln ( "symlinks.Read:" , err )
if os . IsNotExist ( err ) {
return protocol . ErrNoSuchFile
}
return protocol . ErrGeneric
2014-11-09 04:26:52 +00:00
}
2016-04-15 10:59:41 +00:00
if _ , err := strings . NewReader ( target ) . ReadAt ( buf , offset ) ; err != nil {
l . Debugln ( "symlink.Reader.ReadAt" , err )
2015-10-19 14:13:47 +02:00
return protocol . ErrGeneric
2014-11-09 04:26:52 +00:00
}
2016-04-15 10:59:41 +00:00
return nil
}
2014-12-08 11:54:22 +00:00
2016-04-15 10:59:41 +00:00
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
if flags & protocol . FlagFromTemporary != 0 && ! folderCfg . DisableTempIndexes {
tempFn := filepath . Join ( folderPath , defTempNamer . TempName ( name ) )
if err := readOffsetIntoBuf ( tempFn , offset , buf ) ; err == nil {
return nil
}
// Fall through to reading from a non-temp file, just incase the temp
// file has finished downloading.
2013-12-15 11:43:31 +01:00
}
2016-04-15 10:59:41 +00:00
err := readOffsetIntoBuf ( fn , offset , buf )
if os . IsNotExist ( err ) {
return protocol . ErrNoSuchFile
} else if err != nil {
2015-10-19 14:13:47 +02:00
return protocol . ErrGeneric
2013-12-15 11:43:31 +01:00
}
2015-07-29 21:38:22 +01:00
return nil
2013-12-15 11:43:31 +01:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
f , ok := fs . Get ( protocol . LocalDeviceID , file )
2015-01-06 22:12:45 +01:00
return f , ok
2014-04-01 23:18:32 +02:00
}
2015-01-06 22:12:45 +01:00
func ( m * Model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return protocol . FileInfo { } , false
}
f , ok := fs . GetGlobal ( file )
2015-01-06 22:12:45 +01:00
return f , ok
2014-04-01 23:18:32 +02:00
}
2014-03-29 18:53:48 +01:00
type cFiler struct {
m * Model
r string
2014-01-06 11:11:18 +01:00
}
2014-03-16 08:14:55 +01:00
// Implements scanner.CurrentFiler
2015-01-06 22:12:45 +01:00
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
2014-09-28 12:00:38 +01:00
return cf . m . CurrentFolderFile ( cf . r , file )
2014-03-16 08:14:55 +01:00
}
2014-09-28 12:00:38 +01:00
// ConnectedTo returns true if we are connected to the named device.
func ( m * Model ) ConnectedTo ( deviceID protocol . DeviceID ) bool {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ deviceID ]
2014-09-20 19:14:45 +02:00
m . pmut . RUnlock ( )
2014-09-10 11:29:01 +02:00
if ok {
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-10 11:29:01 +02:00
}
2014-01-06 11:11:18 +01:00
return ok
}
2014-11-08 22:12:18 +01:00
func ( m * Model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-09-14 23:03:53 +01:00
var lines [ ] string
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
cfg , ok := m . folderCfgs [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-14 23:03:53 +01:00
if ! ok {
2014-11-08 22:12:18 +01:00
return lines , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2015-12-30 21:30:47 +00:00
if ! cfg . HasMarker ( ) {
return lines , nil , fmt . Errorf ( "Folder %s stopped" , folder )
}
2015-04-05 22:52:22 +02:00
fd , err := os . Open ( filepath . Join ( cfg . Path ( ) , ".stignore" ) )
2014-09-14 23:03:53 +01:00
if err != nil {
if os . IsNotExist ( err ) {
2014-11-08 22:12:18 +01:00
return lines , nil , nil
2014-09-14 23:03:53 +01:00
}
l . Warnln ( "Loading .stignore:" , err )
2014-11-08 22:12:18 +01:00
return lines , nil , err
2014-09-14 23:03:53 +01:00
}
defer fd . Close ( )
scanner := bufio . NewScanner ( fd )
for scanner . Scan ( ) {
lines = append ( lines , strings . TrimSpace ( scanner . Text ( ) ) )
}
2014-11-29 22:29:49 +01:00
m . fmut . RLock ( )
2015-04-27 20:49:10 +01:00
patterns := m . folderIgnores [ folder ] . Patterns ( )
2014-11-29 22:29:49 +01:00
m . fmut . RUnlock ( )
2014-11-08 22:12:18 +01:00
return lines , patterns , nil
2014-09-14 23:03:53 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) SetIgnores ( folder string , content [ ] string ) error {
cfg , ok := m . folderCfgs [ folder ]
2014-09-14 23:03:53 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 23:03:53 +01:00
}
2015-08-30 12:59:01 +01:00
path := filepath . Join ( cfg . Path ( ) , ".stignore" )
fd , err := osutil . CreateAtomic ( path , 0644 )
2014-09-14 23:03:53 +01:00
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
for _ , line := range content {
2015-07-12 01:03:40 +10:00
fmt . Fprintln ( fd , line )
2014-09-14 23:03:53 +01:00
}
2015-07-12 01:03:40 +10:00
if err := fd . Close ( ) ; err != nil {
2014-09-14 23:03:53 +01:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2015-08-30 12:59:01 +01:00
osutil . HideFile ( path )
2014-09-14 23:03:53 +01:00
2014-09-28 12:00:38 +01:00
return m . ScanFolder ( folder )
2014-09-14 23:03:53 +01:00
}
2016-03-25 20:29:07 +00:00
// OnHello is called when an device connects to us.
// This allows us to extract some information from the Hello message
// and add it to a list of known devices ahead of any checks.
func ( m * Model ) OnHello ( remoteID protocol . DeviceID , addr net . Addr , hello protocol . HelloMessage ) {
for deviceID := range m . cfg . Devices ( ) {
if deviceID == remoteID {
// Existing device, we will get the hello message in AddConnection
// hence do not persist any state here, as the connection might
// get killed before AddConnection
return
}
}
if ! m . cfg . IgnoredDevice ( remoteID ) {
events . Default . Log ( events . DeviceRejected , map [ string ] string {
"name" : hello . DeviceName ,
"device" : remoteID . String ( ) ,
"address" : addr . String ( ) ,
} )
}
}
// GetHello is called when we are about to connect to some remote device.
func ( m * Model ) GetHello ( protocol . DeviceID ) protocol . HelloMessage {
return protocol . HelloMessage {
DeviceName : m . deviceName ,
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
}
}
2014-01-06 11:11:18 +01:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 12:00:38 +01:00
// folder changes.
2016-05-04 19:38:12 +00:00
func ( m * Model ) AddConnection ( conn connections . Connection , hello protocol . HelloMessage ) {
2015-06-28 16:05:29 +01:00
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
2014-01-17 20:06:44 -07:00
m . pmut . Lock ( )
2015-06-28 16:05:29 +01:00
if _ , ok := m . conn [ deviceID ] ; ok {
2014-09-28 12:00:38 +01:00
panic ( "add existing device" )
2014-03-23 08:45:05 +01:00
}
2015-06-28 16:05:29 +01:00
m . conn [ deviceID ] = conn
2016-04-15 10:59:41 +00:00
m . deviceDownloads [ deviceID ] = newDeviceDownloadState ( )
2014-01-06 11:11:18 +01:00
2016-03-25 20:29:07 +00:00
m . helloMessages [ deviceID ] = hello
event := map [ string ] string {
"id" : deviceID . String ( ) ,
"deviceName" : hello . DeviceName ,
"clientName" : hello . ClientName ,
"clientVersion" : hello . ClientVersion ,
2016-05-04 19:38:12 +00:00
"type" : conn . Type ,
2016-03-25 20:29:07 +00:00
}
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
events . Default . Log ( events . DeviceConnected , event )
l . Infof ( ` Device %s client is "%s %s" named "%s" ` , deviceID , hello . ClientName , hello . ClientVersion , hello . DeviceName )
2015-06-28 16:05:29 +01:00
conn . Start ( )
2015-07-10 16:37:57 +10:00
2015-11-17 12:08:53 +01:00
cm := m . generateClusterConfig ( deviceID )
2015-06-28 16:05:29 +01:00
conn . ClusterConfig ( cm )
2014-04-13 15:28:26 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
for _ , folder := range m . deviceFolders [ deviceID ] {
fs := m . folderFiles [ folder ]
2015-06-28 16:05:29 +01:00
go sendIndexes ( conn , folder , fs , m . folderIgnores [ folder ] )
2014-05-04 17:18:58 +02:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-07-15 13:04:37 +02:00
m . pmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2016-04-18 20:25:31 +00:00
device , ok := m . cfg . Devices ( ) [ deviceID ]
2016-05-12 08:23:18 +00:00
if ok && ( device . Name == "" || m . cfg . Options ( ) . OverwriteRemoteDevNames ) {
2016-04-18 20:25:31 +00:00
device . Name = hello . DeviceName
m . cfg . SetDevice ( device )
m . cfg . Save ( )
}
2014-09-28 12:00:38 +01:00
m . deviceWasSeen ( deviceID )
2014-09-20 19:14:45 +02:00
}
2015-08-23 21:56:10 +02:00
func ( m * Model ) PauseDevice ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . devicePaused [ device ] = true
_ , ok := m . conn [ device ]
m . pmut . Unlock ( )
if ok {
m . Close ( device , errors . New ( "device paused" ) )
}
events . Default . Log ( events . DevicePaused , map [ string ] string { "device" : device . String ( ) } )
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) DownloadProgress ( device protocol . DeviceID , folder string , updates [ ] protocol . FileDownloadProgressUpdate , flags uint32 , options [ ] protocol . Option ) {
if ! m . folderSharedWith ( folder , device ) {
return
}
m . fmut . RLock ( )
cfg , ok := m . folderCfgs [ folder ]
m . fmut . RUnlock ( )
2016-05-04 10:47:33 +00:00
if ! ok || cfg . Type == config . FolderTypeReadOnly || cfg . DisableTempIndexes {
2016-04-15 10:59:41 +00:00
return
}
m . pmut . RLock ( )
m . deviceDownloads [ device ] . Update ( folder , updates )
2016-05-26 06:53:27 +00:00
state := m . deviceDownloads [ device ] . GetBlockCounts ( folder )
2016-04-15 10:59:41 +00:00
m . pmut . RUnlock ( )
2016-05-22 07:52:08 +00:00
events . Default . Log ( events . RemoteDownloadProgress , map [ string ] interface { } {
"device" : device . String ( ) ,
"folder" : folder ,
2016-05-26 06:53:27 +00:00
"state" : state ,
2016-05-22 07:52:08 +00:00
} )
2016-04-15 10:59:41 +00:00
}
2015-08-23 21:56:10 +02:00
func ( m * Model ) ResumeDevice ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . devicePaused [ device ] = false
m . pmut . Unlock ( )
events . Default . Log ( events . DeviceResumed , map [ string ] string { "device" : device . String ( ) } )
}
func ( m * Model ) IsPaused ( device protocol . DeviceID ) bool {
m . pmut . Lock ( )
paused := m . devicePaused [ device ]
m . pmut . Unlock ( )
return paused
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 19:14:45 +02:00
2014-09-28 12:00:38 +01:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 19:14:45 +02:00
return sr
}
2014-12-08 16:36:15 +01:00
2015-09-04 13:22:59 +02:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
2014-12-08 16:36:15 +01:00
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 19:14:45 +02:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 13:04:37 +02:00
}
2014-12-07 20:21:12 +00:00
func ( m * Model ) folderStatRef ( folder string ) * stats . FolderStatisticsReference {
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-12-16 23:33:28 +01:00
sr , ok := m . folderStatRefs [ folder ]
if ! ok {
2014-12-07 20:21:12 +00:00
sr = stats . NewFolderStatisticsReference ( m . db , folder )
m . folderStatRefs [ folder ] = sr
}
2014-12-16 23:33:28 +01:00
return sr
2014-12-07 20:21:12 +00:00
}
2015-06-16 12:12:34 +01:00
func ( m * Model ) receivedFile ( folder string , file protocol . FileInfo ) {
2015-09-04 13:22:59 +02:00
m . folderStatRef ( folder ) . ReceivedFile ( file . Name , file . IsDeleted ( ) )
2014-12-07 20:21:12 +00:00
}
2015-01-12 14:52:24 +01:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-15 13:04:37 +02:00
name := conn . Name ( )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2015-10-03 17:25:21 +02:00
l . Debugf ( "sendIndexes for %s-%s/%q starting" , deviceID , name , folder )
defer l . Debugf ( "sendIndexes for %s-%s/%q exiting: %v" , deviceID , name , folder , err )
2014-05-04 17:18:58 +02:00
2014-09-28 12:00:38 +01:00
minLocalVer , err := sendIndexTo ( true , 0 , conn , folder , fs , ignores )
2014-07-30 20:08:04 +02:00
2016-01-11 16:49:44 +01:00
// Subscribe to LocalIndexUpdated (we have new information to send) and
// DeviceDisconnected (it might be us who disconnected, so we should
// exit).
sub := events . Default . Subscribe ( events . LocalIndexUpdated | events . DeviceDisconnected )
2015-07-28 21:22:44 +04:00
defer events . Default . Unsubscribe ( sub )
2014-07-15 13:04:37 +02:00
for err == nil {
2016-01-11 16:49:44 +01:00
if conn . Closed ( ) {
// Our work is done.
return
}
2015-07-28 21:22:44 +04:00
// While we have sent a localVersion at least equal to the one
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2014-09-28 12:00:38 +01:00
if fs . LocalVersion ( protocol . LocalDeviceID ) <= minLocalVer {
2015-07-28 21:22:44 +04:00
sub . Poll ( time . Minute )
2014-07-30 20:08:04 +02:00
continue
2014-07-15 13:04:37 +02:00
}
2014-09-28 12:00:38 +01:00
minLocalVer , err = sendIndexTo ( false , minLocalVer , conn , folder , fs , ignores )
2015-07-28 21:22:44 +04:00
// Wait a short amount of time before entering the next loop. If there
2015-11-11 21:20:34 -05:00
// are continuous changes happening to the local index, this gives us
2015-07-28 21:22:44 +04:00
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 20:08:04 +02:00
}
}
2014-07-15 13:04:37 +02:00
2015-01-18 02:12:06 +01:00
func sendIndexTo ( initial bool , minLocalVer int64 , conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) ( int64 , error ) {
2014-09-28 12:00:38 +01:00
deviceID := conn . ID ( )
2014-07-30 20:08:04 +02:00
name := conn . Name ( )
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 20:54:59 +02:00
currentBatchSize := 0
2015-01-18 02:12:06 +01:00
maxLocalVer := int64 ( 0 )
2014-07-30 20:08:04 +02:00
var err error
2014-07-15 13:04:37 +02:00
2015-01-12 14:50:30 +01:00
fs . WithHave ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
f := fi . ( protocol . FileInfo )
2014-07-30 20:08:04 +02:00
if f . LocalVersion <= minLocalVer {
return true
}
2014-07-15 13:04:37 +02:00
2014-07-30 20:08:04 +02:00
if f . LocalVersion > maxLocalVer {
maxLocalVer = f . LocalVersion
}
2014-07-15 13:04:37 +02:00
2016-04-07 09:34:07 +00:00
if ignores . Match ( f . Name ) . IsIgnored ( ) || symlinkInvalid ( folder , f ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( "not sending update for ignored/unsupported symlink" , f )
2014-09-04 22:29:53 +02:00
return true
}
2014-08-11 20:54:59 +02:00
if len ( batch ) == indexBatchSize || currentBatchSize > indexTargetSize {
2014-07-30 20:08:04 +02:00
if initial {
2015-01-14 22:11:31 +00:00
if err = conn . Index ( folder , batch , 0 , nil ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
2015-10-03 17:25:21 +02:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 20:08:04 +02:00
initial = false
} else {
2015-01-14 22:11:31 +00:00
if err = conn . IndexUpdate ( folder , batch , 0 , nil ) ; err != nil {
2014-07-30 20:08:04 +02:00
return false
}
2015-10-03 17:25:21 +02:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-03 12:30:10 +02:00
}
2014-01-06 11:11:18 +01:00
2014-07-30 20:08:04 +02:00
batch = make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 20:54:59 +02:00
currentBatchSize = 0
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
batch = append ( batch , f )
2015-05-25 11:05:12 +02:00
currentBatchSize += indexPerFileSize + len ( f . Blocks ) * indexPerBlockSize
2014-07-30 20:08:04 +02:00
return true
} )
if initial && err == nil {
2015-01-14 22:11:31 +00:00
err = conn . Index ( folder , batch , 0 , nil )
2015-10-03 17:25:21 +02:00
if err == nil {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (small initial index)" , deviceID , name , folder , len ( batch ) )
2014-07-30 20:08:04 +02:00
}
} else if len ( batch ) > 0 && err == nil {
2015-01-14 22:11:31 +00:00
err = conn . IndexUpdate ( folder , batch , 0 , nil )
2015-10-03 17:25:21 +02:00
if err == nil {
2014-09-28 12:00:38 +01:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (last batch)" , deviceID , name , folder , len ( batch ) )
2014-07-30 20:08:04 +02:00
}
2014-07-15 13:04:37 +02:00
}
2014-07-30 20:08:04 +02:00
return maxLocalVer , err
2014-01-06 11:11:18 +01:00
}
2016-05-19 00:19:26 +00:00
func ( m * Model ) updateLocalsFromScanning ( folder string , fs [ ] protocol . FileInfo ) {
2016-05-19 07:01:43 +00:00
m . updateLocals ( folder , fs )
// Fire the LocalChangeDetected event to notify listeners about local
// updates.
m . fmut . RLock ( )
path := m . folderCfgs [ folder ] . Path ( )
m . fmut . RUnlock ( )
m . localChangeDetected ( folder , path , fs )
2016-05-19 00:19:26 +00:00
}
func ( m * Model ) updateLocalsFromPulling ( folder string , fs [ ] protocol . FileInfo ) {
2016-05-19 07:01:43 +00:00
m . updateLocals ( folder , fs )
2016-05-19 00:19:26 +00:00
}
2016-05-19 07:01:43 +00:00
func ( m * Model ) updateLocals ( folder string , fs [ ] protocol . FileInfo ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-06-16 08:30:15 +02:00
files := m . folderFiles [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2016-01-16 21:42:32 +01:00
if files == nil {
// The folder doesn't exist.
return
}
2015-06-16 08:30:15 +02:00
files . Update ( protocol . LocalDeviceID , fs )
2015-12-04 08:41:13 +01:00
filenames := make ( [ ] string , len ( fs ) )
for i , file := range fs {
filenames [ i ] = file . Name
}
2014-07-17 13:38:36 +02:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2015-12-04 08:41:13 +01:00
"folder" : folder ,
"items" : len ( fs ) ,
"filenames" : filenames ,
"version" : files . LocalVersion ( protocol . LocalDeviceID ) ,
2014-07-17 13:38:36 +02:00
} )
2016-05-19 00:19:26 +00:00
}
2016-05-19 07:01:43 +00:00
func ( m * Model ) localChangeDetected ( folder , path string , files [ ] protocol . FileInfo ) {
2016-05-19 00:19:26 +00:00
// For windows paths, strip unwanted chars from the front
path = strings . Replace ( path , ` \\?\ ` , "" , 1 )
for _ , file := range files {
objType := "file"
2016-05-19 07:01:43 +00:00
action := "modified"
2016-05-19 00:19:26 +00:00
// If our local vector is verison 1 AND it is the only version vector so far seen for this file then
// it is a new file. Else if it is > 1 it's not new, and if it is 1 but another shortId version vector
// exists then it is new for us but created elsewhere so the file is still not new but modified by us.
// Only if it is truly new do we change this to 'added', else we leave it as 'modified'.
if len ( file . Version ) == 1 && file . Version [ 0 ] . Value == 1 {
2016-05-19 07:01:43 +00:00
action = "added"
2016-05-19 00:19:26 +00:00
}
if file . IsDirectory ( ) {
objType = "dir"
}
if file . IsDeleted ( ) {
2016-05-19 07:01:43 +00:00
action = "deleted"
2016-05-19 00:19:26 +00:00
}
// If the file is a level or more deep then the forward slash seperator is embedded
// in the filename and makes the path look wierd on windows, so lets fix it
filename := filepath . FromSlash ( file . Name )
// And append it to the filepath
path := filepath . Join ( path , filename )
2016-05-19 07:01:43 +00:00
events . Default . Log ( events . LocalChangeDetected , map [ string ] string {
"folder" : folder ,
2016-05-19 00:19:26 +00:00
"action" : action ,
"type" : objType ,
"path" : path ,
} )
}
2014-03-28 14:36:57 +01:00
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , fromTemporary bool ) ( [ ] byte , error ) {
2014-01-17 20:06:44 -07:00
m . pmut . RLock ( )
2015-06-28 16:05:29 +01:00
nc , ok := m . conn [ deviceID ]
2014-01-17 20:06:44 -07:00
m . pmut . RUnlock ( )
2014-01-06 11:11:18 +01:00
if ! ok {
2014-09-28 12:00:38 +01:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 11:11:18 +01:00
}
2016-04-15 10:59:41 +00:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x ft=%t op=%s" , m , deviceID , folder , name , offset , size , hash , fromTemporary )
2014-01-06 11:11:18 +01:00
2016-04-15 10:59:41 +00:00
return nc . Request ( folder , name , offset , size , hash , fromTemporary )
2014-01-06 11:11:18 +01:00
}
2014-09-28 12:00:38 +01:00
func ( m * Model ) AddFolder ( cfg config . FolderConfiguration ) {
2014-05-23 14:31:16 +02:00
if len ( cfg . ID ) == 0 {
2014-09-28 12:00:38 +01:00
panic ( "cannot add empty folder id" )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . Lock ( )
2014-09-28 12:00:38 +01:00
m . folderCfgs [ cfg . ID ] = cfg
2015-01-12 14:52:24 +01:00
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , m . db )
2013-12-15 11:43:31 +01:00
2014-09-28 12:00:38 +01:00
m . folderDevices [ cfg . ID ] = make ( [ ] protocol . DeviceID , len ( cfg . Devices ) )
for i , device := range cfg . Devices {
m . folderDevices [ cfg . ID ] [ i ] = device . DeviceID
m . deviceFolders [ device . DeviceID ] = append ( m . deviceFolders [ device . DeviceID ] , cfg . ID )
2014-03-29 18:53:48 +01:00
}
2014-01-23 22:20:15 +01:00
2015-07-23 16:13:53 +02:00
ignores := ignore . New ( m . cacheIgnoredFiles )
2015-09-29 18:01:19 +02:00
if err := ignores . Load ( filepath . Join ( cfg . Path ( ) , ".stignore" ) ) ; err != nil && ! os . IsNotExist ( err ) {
l . Warnln ( "Loading ignores:" , err )
}
2014-11-22 02:19:16 +00:00
m . folderIgnores [ cfg . ID ] = ignores
2014-09-28 12:39:39 +01:00
m . fmut . Unlock ( )
2014-03-29 18:53:48 +01:00
}
2014-01-23 22:20:15 +01:00
2015-02-11 19:52:59 +01:00
func ( m * Model ) ScanFolders ( ) map [ string ] error {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-13 05:12:01 +09:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 12:00:38 +01:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 18:53:48 +01:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 23:54:31 +01:00
errorsMut := sync . NewMutex ( )
2015-02-11 19:52:59 +01:00
2015-04-22 23:54:31 +01:00
wg := sync . NewWaitGroup ( )
2014-09-28 12:00:38 +01:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 20:42:12 -03:00
go func ( ) {
2014-09-28 12:00:38 +01:00
err := m . ScanFolder ( folder )
2014-05-28 06:55:30 +02:00
if err != nil {
2015-02-11 19:52:59 +01:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2015-04-13 05:12:01 +09:00
2015-03-28 14:25:42 +00:00
// Potentially sets the error twice, once in the scanner just
// by doing a check, and once here, if the error returned is
// the same one as returned by CheckFolderHealth, though
2015-04-13 05:12:01 +09:00
// duplicate set is handled by setError.
m . fmut . RLock ( )
srv := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
srv . setError ( err )
2014-05-28 06:55:30 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Done ( )
} ( )
2014-04-14 09:58:17 +02:00
}
2014-05-13 20:42:12 -03:00
wg . Wait ( )
2015-02-11 19:52:59 +01:00
return errors
2014-03-29 18:53:48 +01:00
}
2013-12-15 11:43:31 +01:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) ScanFolder ( folder string ) error {
2015-03-27 09:51:18 +01:00
return m . ScanFolderSubs ( folder , nil )
2014-08-11 20:20:01 +02:00
}
2015-03-27 09:51:18 +01:00
func ( m * Model ) ScanFolderSubs ( folder string , subs [ ] string ) error {
2015-06-20 19:26:25 +02:00
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
// Folders are added to folderRunners only when they are started. We can't
// scan them before they have started, so that's what we need to check for
// here.
if ! ok {
return errors . New ( "no such folder" )
}
return runner . Scan ( subs )
}
2016-04-26 14:01:46 +00:00
func ( m * Model ) internalScanFolderSubdirs ( folder string , subs [ ] string ) error {
2015-03-27 09:51:18 +01:00
for i , sub := range subs {
sub = osutil . NativeFilename ( sub )
if p := filepath . Clean ( filepath . Join ( folder , sub ) ) ; ! strings . HasPrefix ( p , folder ) {
return errors . New ( "invalid subpath" )
}
subs [ i ] = sub
2014-08-11 20:20:01 +02:00
}
2014-11-29 22:29:49 +01:00
m . fmut . Lock ( )
2015-03-16 21:14:19 +01:00
fs := m . folderFiles [ folder ]
2014-12-23 13:41:02 +01:00
folderCfg := m . folderCfgs [ folder ]
2014-12-23 10:05:08 +01:00
ignores := m . folderIgnores [ folder ]
2015-03-16 21:14:19 +01:00
runner , ok := m . folderRunners [ folder ]
2014-12-23 13:41:02 +01:00
m . fmut . Unlock ( )
2015-03-16 21:14:19 +01:00
// Folders are added to folderRunners only when they are started. We can't
// scan them before they have started, so that's what we need to check for
// here.
2014-12-23 13:41:02 +01:00
if ! ok {
return errors . New ( "no such folder" )
}
2015-07-16 12:52:36 +02:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-09-29 18:01:19 +02:00
runner . setError ( err )
l . Infof ( "Stopping folder %s due to error: %s" , folder , err )
2015-07-16 12:52:36 +02:00
return err
}
2015-09-29 18:01:19 +02:00
if err := ignores . Load ( filepath . Join ( folderCfg . Path ( ) , ".stignore" ) ) ; err != nil && ! os . IsNotExist ( err ) {
err = fmt . Errorf ( "loading ignores: %v" , err )
runner . setError ( err )
l . Infof ( "Stopping folder %s due to error: %s" , folder , err )
return err
}
2014-09-04 22:29:53 +02:00
2016-03-18 08:28:44 +00:00
// Clean the list of subitems to ensure that we start at a known
// directory, and don't scan subdirectories of things we've already
// scanned.
subs = unifySubs ( subs , func ( f string ) bool {
_ , ok := fs . Get ( protocol . LocalDeviceID , f )
return ok
} )
2015-03-08 17:33:41 +00:00
2015-11-13 15:00:32 +01:00
// The cancel channel is closed whenever we return (such as from an error),
// to signal the potentially still running walker to stop.
cancel := make ( chan struct { } )
defer close ( cancel )
2016-05-09 18:25:39 +00:00
runner . setState ( FolderScanning )
fchan , err := scanner . Walk ( scanner . Config {
2015-08-26 23:49:06 +01:00
Folder : folderCfg . ID ,
Dir : folderCfg . Path ( ) ,
Subs : subs ,
Matcher : ignores ,
BlockSize : protocol . BlockSize ,
TempNamer : defTempNamer ,
TempLifetime : time . Duration ( m . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
CurrentFiler : cFiler { m , folder } ,
MtimeRepo : db . NewVirtualMtimeRepo ( m . db , folderCfg . ID ) ,
IgnorePerms : folderCfg . IgnorePerms ,
AutoNormalize : folderCfg . AutoNormalize ,
Hashers : m . numHashers ( folder ) ,
ShortID : m . shortID ,
ProgressTickIntervalS : folderCfg . ScanProgressIntervalS ,
2015-11-13 15:00:32 +01:00
Cancel : cancel ,
2016-05-09 18:25:39 +00:00
} )
2014-07-15 14:27:46 +02:00
2014-05-04 18:20:25 +02:00
if err != nil {
2015-06-13 19:10:11 +01:00
// The error we get here is likely an OS level error, which might not be
// as readable as our health check errors. Check if we can get a health
// check error first, and use that if it's available.
if ferr := m . CheckFolderHealth ( folder ) ; ferr != nil {
err = ferr
}
2015-04-13 05:12:01 +09:00
runner . setError ( err )
2014-05-04 18:20:25 +02:00
return err
}
2015-04-13 05:12:01 +09:00
2015-04-17 15:19:40 +09:00
batchSizeFiles := 100
batchSizeBlocks := 2048 // about 256 MB
batch := make ( [ ] protocol . FileInfo , 0 , batchSizeFiles )
blocksHandled := 0
2014-07-15 14:27:46 +02:00
for f := range fchan {
2015-04-17 15:19:40 +09:00
if len ( batch ) == batchSizeFiles || blocksHandled > batchSizeBlocks {
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-03-30 22:49:16 +01:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
2015-03-28 14:25:42 +00:00
return err
}
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
batch = batch [ : 0 ]
2015-04-17 15:19:40 +09:00
blocksHandled = 0
2014-07-15 14:27:46 +02:00
}
batch = append ( batch , f )
2015-04-17 15:19:40 +09:00
blocksHandled += len ( f . Blocks )
2014-07-15 14:27:46 +02:00
}
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-03-30 22:49:16 +01:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
2015-03-28 14:25:42 +00:00
return err
} else if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
}
2016-03-18 12:16:33 +00:00
if len ( subs ) == 0 {
// If we have no specific subdirectories to traverse, set it to one
// empty prefix so we traverse the entire folder contents once.
subs = [ ] string { "" }
}
2014-09-04 22:29:53 +02:00
2016-03-18 12:16:33 +00:00
// Do a scan of the database for each prefix, to check for deleted files.
batch = batch [ : 0 ]
for _ , sub := range subs {
var iterError error
fs . WithPrefixedHaveTruncated ( protocol . LocalDeviceID , sub , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if ! f . IsDeleted ( ) {
if len ( batch ) == batchSizeFiles {
if err := m . CheckFolderHealth ( folder ) ; err != nil {
iterError = err
return false
}
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2016-03-18 12:16:33 +00:00
batch = batch [ : 0 ]
2014-09-04 22:29:53 +02:00
}
2016-03-18 12:16:33 +00:00
2016-04-07 09:34:07 +00:00
if ignores . Match ( f . Name ) . IsIgnored ( ) || symlinkInvalid ( folder , f ) {
2016-03-18 12:16:33 +00:00
// File has been ignored or an unsupported symlink. Set invalid bit.
l . Debugln ( "setting invalid bit on ignored" , f )
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagInvalid ,
Modified : f . Modified ,
Version : f . Version , // The file is still the same, so don't bump version
}
batch = append ( batch , nf )
} else if _ , err := osutil . Lstat ( filepath . Join ( folderCfg . Path ( ) , f . Name ) ) ; err != nil {
// File has been deleted.
// We don't specifically verify that the error is
// os.IsNotExist because there is a corner case when a
// directory is suddenly transformed into a file. When that
// happens, files that were in the directory (that is now a
// file) are deleted but will return a confusing error ("not a
// directory") when we try to Lstat() them.
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagDeleted ,
Modified : f . Modified ,
Version : f . Version . Update ( m . shortID ) ,
}
2016-04-04 12:53:55 +02:00
// The deleted file might have been ignored at some
// point, but it currently isn't so we make sure to
// clear the invalid bit.
nf . Flags &^= protocol . FlagInvalid
2016-03-18 12:16:33 +00:00
batch = append ( batch , nf )
2014-08-12 13:53:31 +02:00
}
2014-07-15 14:27:46 +02:00
}
2016-03-18 12:16:33 +00:00
return true
} )
2015-05-27 22:46:10 +01:00
2016-03-18 12:16:33 +00:00
if iterError != nil {
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , iterError )
return iterError
}
2015-05-27 22:46:10 +01:00
}
if err := m . CheckFolderHealth ( folder ) ; err != nil {
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
return err
} else if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 14:27:46 +02:00
}
2015-04-13 05:12:01 +09:00
runner . setState ( FolderIdle )
2014-05-04 18:20:25 +02:00
return nil
2014-03-29 18:53:48 +01:00
}
2015-05-01 14:30:17 +02:00
func ( m * Model ) DelayScan ( folder string , next time . Duration ) {
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 20:46:32 +02:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
func ( m * Model ) numHashers ( folder string ) int {
m . fmut . Lock ( )
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
m . fmut . Unlock ( )
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 10:05:06 +02:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 20:46:32 +02:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2015-11-17 12:08:53 +01:00
// generateClusterConfig returns a ClusterConfigMessage that is correct for
// the given peer device
func ( m * Model ) generateClusterConfig ( device protocol . DeviceID ) protocol . ClusterConfigMessage {
2016-03-25 20:29:07 +00:00
var message protocol . ClusterConfigMessage
2014-04-13 15:28:26 +02:00
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
for _ , folder := range m . deviceFolders [ device ] {
2015-09-27 12:11:34 +01:00
folderCfg := m . cfg . Folders ( ) [ folder ]
2016-03-11 09:48:46 +00:00
protocolFolder := protocol . Folder {
ID : folder ,
Label : folderCfg . Label ,
2014-01-09 13:58:35 +01:00
}
2015-09-27 12:11:34 +01:00
var flags uint32
2016-05-04 10:47:33 +00:00
if folderCfg . Type == config . FolderTypeReadOnly {
2015-09-27 12:11:34 +01:00
flags |= protocol . FlagFolderReadOnly
}
if folderCfg . IgnorePerms {
flags |= protocol . FlagFolderIgnorePerms
}
if folderCfg . IgnoreDelete {
flags |= protocol . FlagFolderIgnoreDelete
}
2016-04-15 10:59:41 +00:00
if folderCfg . DisableTempIndexes {
flags |= protocol . FlagFolderDisabledTempIndexes
}
2016-03-11 09:48:46 +00:00
protocolFolder . Flags = flags
2014-09-28 12:00:38 +01:00
for _ , device := range m . folderDevices [ folder ] {
// DeviceID is a value type, but with an underlying array. Copy it
// so we don't grab aliases to the same array later on in device[:]
device := device
2015-09-27 11:39:02 +01:00
// TODO: Set read only bit when relevant, and when we have per device
// access controls.
deviceCfg := m . cfg . Devices ( ) [ device ]
2016-03-11 09:48:46 +00:00
protocolDevice := protocol . Device {
2015-09-27 11:39:02 +01:00
ID : device [ : ] ,
Name : deviceCfg . Name ,
Addresses : deviceCfg . Addresses ,
Compression : uint32 ( deviceCfg . Compression ) ,
CertName : deviceCfg . CertName ,
Flags : protocol . FlagShareTrusted ,
2014-09-23 16:04:20 +02:00
}
2015-09-27 11:39:02 +01:00
if deviceCfg . Introducer {
2016-03-11 09:48:46 +00:00
protocolDevice . Flags |= protocol . FlagIntroducer
2014-09-23 16:04:20 +02:00
}
2016-03-11 09:48:46 +00:00
protocolFolder . Devices = append ( protocolFolder . Devices , protocolDevice )
2014-01-09 13:58:35 +01:00
}
2016-03-11 09:48:46 +00:00
message . Folders = append ( message . Folders , protocolFolder )
2013-12-29 20:33:57 -05:00
}
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2014-04-13 15:28:26 +02:00
2016-03-11 09:48:46 +00:00
return message
2013-12-29 20:33:57 -05:00
}
2014-04-14 09:58:17 +02:00
2015-04-13 05:12:01 +09:00
func ( m * Model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 21:14:19 +01:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-13 05:12:01 +09:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 21:14:19 +01:00
}
2015-04-13 05:12:01 +09:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 09:58:17 +02:00
}
2014-06-16 10:47:02 +02:00
2014-09-28 12:00:38 +01:00
func ( m * Model ) Override ( folder string ) {
2014-09-28 12:39:39 +01:00
m . fmut . RLock ( )
2015-04-18 22:41:47 +09:00
fs , ok := m . folderFiles [ folder ]
2015-03-16 21:14:19 +01:00
runner := m . folderRunners [ folder ]
2014-09-28 12:39:39 +01:00
m . fmut . RUnlock ( )
2015-04-18 22:41:47 +09:00
if ! ok {
return
}
2014-06-23 11:52:13 +02:00
2015-03-16 21:14:19 +01:00
runner . setState ( FolderScanning )
2014-07-15 17:54:00 +02:00
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2015-01-12 14:50:30 +01:00
fs . WithNeed ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 13:53:31 +02:00
need := fi . ( protocol . FileInfo )
2014-07-15 17:54:00 +02:00
if len ( batch ) == indexBatchSize {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-07-15 17:54:00 +02:00
batch = batch [ : 0 ]
}
2015-01-06 22:12:45 +01:00
have , ok := fs . Get ( protocol . LocalDeviceID , need . Name )
if ! ok || have . Name != need . Name {
2014-06-16 10:47:02 +02:00
// We are missing the file
2014-07-15 17:54:00 +02:00
need . Flags |= protocol . FlagDeleted
need . Blocks = nil
2015-04-02 10:21:11 +02:00
need . Version = need . Version . Update ( m . shortID )
2014-06-16 10:47:02 +02:00
} else {
// We have the file, replace with our version
2015-04-02 10:21:11 +02:00
have . Version = have . Version . Merge ( need . Version ) . Update ( m . shortID )
2014-07-15 17:54:00 +02:00
need = have
2014-06-16 10:47:02 +02:00
}
2014-07-15 17:54:00 +02:00
need . LocalVersion = 0
batch = append ( batch , need )
return true
} )
if len ( batch ) > 0 {
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( folder , batch )
2014-06-16 10:47:02 +02:00
}
2015-03-16 21:14:19 +01:00
runner . setState ( FolderIdle )
2014-06-16 10:47:02 +02:00
}
2014-06-20 00:27:54 +02:00
2014-09-28 12:00:38 +01:00
// CurrentLocalVersion returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 14:44:15 +02:00
// changed.
2015-06-24 08:52:38 +01:00
func ( m * Model ) CurrentLocalVersion ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
2014-10-12 10:36:04 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-09-27 14:44:15 +02:00
}
2015-06-24 08:52:38 +01:00
return fs . LocalVersion ( protocol . LocalDeviceID ) , true
2014-09-27 14:44:15 +02:00
}
2014-09-28 12:00:38 +01:00
// RemoteLocalVersion returns the change version for the given folder, as
2014-09-27 14:44:15 +02:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 12:00:38 +01:00
// the remote or global folder has changed.
2015-06-24 08:52:38 +01:00
func ( m * Model ) RemoteLocalVersion ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 17:54:00 +02:00
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2014-07-15 17:54:00 +02:00
if ! ok {
2014-10-24 14:54:36 +02:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 08:52:38 +01:00
return 0 , false
2014-07-15 17:54:00 +02:00
}
2015-01-18 02:12:06 +01:00
var ver int64
2014-09-28 12:00:38 +01:00
for _ , n := range m . folderDevices [ folder ] {
2014-07-15 17:54:00 +02:00
ver += fs . LocalVersion ( n )
2014-06-20 00:27:54 +02:00
}
2015-06-24 08:52:38 +01:00
return ver , true
2014-06-20 00:27:54 +02:00
}
2014-09-27 14:44:15 +02:00
2015-02-07 10:52:42 +00:00
func ( m * Model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
files . WithPrefixedGlobalTruncated ( prefix , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if f . IsInvalid ( ) || f . IsDeleted ( ) || f . Name == prefix {
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 22:37:04 +09:00
last [ base ] = [ ] interface { } {
time . Unix ( f . Modified , 0 ) , f . Size ( ) ,
2015-02-07 10:52:42 +00:00
}
}
return true
} )
return output
}
2016-04-15 10:59:41 +00:00
func ( m * Model ) Availability ( folder , file string , version protocol . Vector , block protocol . BlockInfo ) [ ] Availability {
2014-10-31 23:41:18 +00:00
// Acquire this lock first, as the value returned from foldersFiles can
2014-12-28 23:11:32 +00:00
// get heavily modified on Close()
2014-10-31 23:41:18 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 12:00:38 +01:00
fs , ok := m . folderFiles [ folder ]
2016-04-15 10:59:41 +00:00
devices := m . folderDevices [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 14:44:15 +02:00
if ! ok {
return nil
}
2016-04-15 10:59:41 +00:00
var availabilities [ ] Availability
2014-10-31 23:41:18 +00:00
for _ , device := range fs . Availability ( file ) {
2015-06-28 16:05:29 +01:00
_ , ok := m . conn [ device ]
2014-10-31 23:41:18 +00:00
if ok {
2016-04-15 10:59:41 +00:00
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : false } )
2014-10-31 23:41:18 +00:00
}
}
2016-04-15 10:59:41 +00:00
for _ , device := range devices {
if m . deviceDownloads [ device ] . Has ( folder , file , version , int32 ( block . Offset / protocol . BlockSize ) ) {
availabilities = append ( availabilities , Availability { ID : device , FromTemporary : true } )
}
}
return availabilities
2014-09-27 14:44:15 +02:00
}
2015-04-28 22:32:10 +02:00
// BringToFront bumps the given files priority in the job queue.
2014-12-30 09:35:21 +01:00
func ( m * Model ) BringToFront ( folder , file string ) {
2014-12-01 19:23:06 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 09:35:21 +01:00
runner . BringToFront ( file )
2014-12-01 19:23:06 +00:00
}
}
2015-04-28 22:32:10 +02:00
// CheckFolderHealth checks the folder for common errors and returns the
// current folder error, or nil if the folder is healthy.
2015-03-28 14:25:42 +00:00
func ( m * Model ) CheckFolderHealth ( id string ) error {
2015-11-13 15:00:32 +01:00
folder , ok := m . cfg . Folders ( ) [ id ]
if ! ok {
return errors . New ( "folder does not exist" )
}
2015-09-05 09:43:07 +02:00
if minFree := m . cfg . Options ( ) . MinHomeDiskFreePct ; minFree > 0 {
2015-08-09 10:35:48 +02:00
if free , err := osutil . DiskFreePercentage ( m . cfg . ConfigPath ( ) ) ; err == nil && free < minFree {
2015-09-06 08:29:10 +02:00
return errors . New ( "home disk has insufficient free space" )
2015-08-09 10:35:48 +02:00
}
2015-07-16 12:52:36 +02:00
}
2015-04-05 22:52:22 +02:00
fi , err := os . Stat ( folder . Path ( ) )
2015-09-06 08:29:10 +02:00
v , ok := m . CurrentLocalVersion ( id )
indexHasFiles := ok && v > 0
if indexHasFiles {
// There are files in the folder according to the index, so it must
// have existed and had a correct marker at some point. Verify that
// this is still the case.
switch {
case err != nil || ! fi . IsDir ( ) :
2015-04-13 05:12:01 +09:00
err = errors . New ( "folder path missing" )
2015-09-06 08:29:10 +02:00
case ! folder . HasMarker ( ) :
2015-04-13 05:12:01 +09:00
err = errors . New ( "folder marker missing" )
2015-09-06 08:29:10 +02:00
2016-05-04 10:47:33 +00:00
case folder . Type != config . FolderTypeReadOnly :
2015-09-06 08:29:10 +02:00
// Check for free space, if it isn't a master folder. We aren't
// going to change the contents of master folders, so we don't
// care about the amount of free space there.
2015-12-15 08:45:28 +01:00
diskFreeP , errDfp := osutil . DiskFreePercentage ( folder . Path ( ) )
if errDfp == nil && diskFreeP < folder . MinDiskFreePct {
diskFreeBytes , _ := osutil . DiskFreeBytes ( folder . Path ( ) )
str := fmt . Sprintf ( "insufficient free space (%d MiB, %.2f%%)" , diskFreeBytes / 1024 / 1024 , diskFreeP )
err = errors . New ( str )
2015-09-06 08:29:10 +02:00
}
2015-03-28 14:25:42 +00:00
}
2015-09-06 08:29:10 +02:00
} else {
// It's a blank folder, so this may the first time we're looking at
// it. Attempt to create and tag with our marker as appropriate.
if os . IsNotExist ( err ) {
err = osutil . MkdirAll ( folder . Path ( ) , 0700 )
}
if err == nil && ! folder . HasMarker ( ) {
2015-03-28 14:25:42 +00:00
err = folder . CreateMarker ( )
}
}
2015-04-13 05:12:01 +09:00
m . fmut . RLock ( )
2015-04-25 15:27:45 +09:00
runner , runnerExists := m . folderRunners [ folder . ID ]
2015-04-13 05:12:01 +09:00
m . fmut . RUnlock ( )
2015-04-25 15:27:45 +09:00
var oldErr error
if runnerExists {
_ , _ , oldErr = runner . getState ( )
}
2015-03-28 14:25:42 +00:00
2015-04-13 05:12:01 +09:00
if err != nil {
if oldErr != nil && oldErr . Error ( ) != err . Error ( ) {
l . Infof ( "Folder %q error changed: %q -> %q" , folder . ID , oldErr , err )
} else if oldErr == nil {
l . Warnf ( "Stopping folder %q - %v" , folder . ID , err )
2015-03-28 14:25:42 +00:00
}
2015-04-25 15:27:45 +09:00
if runnerExists {
runner . setError ( err )
}
2015-04-13 05:12:01 +09:00
} else if oldErr != nil {
l . Infof ( "Folder %q error is cleared, restarting" , folder . ID )
2015-04-25 15:27:45 +09:00
if runnerExists {
2015-06-13 19:10:11 +01:00
runner . clearError ( )
2015-04-25 15:27:45 +09:00
}
2015-03-28 14:25:42 +00:00
}
return err
}
2015-06-21 09:35:41 +02:00
func ( m * Model ) ResetFolder ( folder string ) {
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 20:06:03 +02:00
}
2014-09-27 14:44:15 +02:00
func ( m * Model ) String ( ) string {
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 14:43:01 +02:00
2015-06-03 09:47:39 +02:00
func ( m * Model ) VerifyConfiguration ( from , to config . Configuration ) error {
return nil
}
func ( m * Model ) CommitConfiguration ( from , to config . Configuration ) bool {
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 09:02:55 +02:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 16:13:53 +02:00
for folderID , cfg := range toFolders {
2015-07-22 09:02:55 +02:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 16:13:53 +02:00
// A folder was added.
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "adding folder" , folderID )
2015-07-23 16:13:53 +02:00
m . AddFolder ( cfg )
2016-05-04 10:47:33 +00:00
m . StartFolder ( folderID )
2015-07-23 16:13:53 +02:00
// Drop connections to all devices that can now share the new
// folder.
m . pmut . Lock ( )
for _ , dev := range cfg . DeviceIDs ( ) {
2015-06-28 16:05:29 +01:00
if conn , ok := m . conn [ dev ] ; ok {
2015-07-23 16:13:53 +02:00
closeRawConn ( conn )
}
2015-07-22 09:02:55 +02:00
}
2015-07-23 16:13:53 +02:00
m . pmut . Unlock ( )
2015-07-22 09:02:55 +02:00
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
2015-11-13 13:30:52 +01:00
// The folder was removed.
m . RemoveFolder ( folderID )
continue
2015-07-22 09:02:55 +02:00
}
// This folder exists on both sides. Compare the device lists, as we
// can handle adding a device (but not currently removing one).
fromDevs := mapDevices ( fromCfg . DeviceIDs ( ) )
toDevs := mapDevices ( toCfg . DeviceIDs ( ) )
for dev := range fromDevs {
if _ , ok := toDevs [ dev ] ; ! ok {
// A device was removed. Requires restart.
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, removing device" , dev , "from folder" , folderID )
2015-07-22 09:02:55 +02:00
return false
}
}
for dev := range toDevs {
if _ , ok := fromDevs [ dev ] ; ! ok {
// A device was added. Handle it!
m . fmut . Lock ( )
m . pmut . Lock ( )
m . folderCfgs [ folderID ] = toCfg
m . folderDevices [ folderID ] = append ( m . folderDevices [ folderID ] , dev )
m . deviceFolders [ dev ] = append ( m . deviceFolders [ dev ] , folderID )
// If we already have a connection to this device, we should
// disconnect it so that we start sharing the folder with it.
// We close the underlying connection and let the normal error
// handling kick in to clean up and reconnect.
2015-06-28 16:05:29 +01:00
if conn , ok := m . conn [ dev ] ; ok {
2015-07-22 09:02:55 +02:00
closeRawConn ( conn )
}
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
}
2016-03-16 12:16:38 +01:00
// Check if anything else differs, apart from the device list and label.
2015-07-22 09:02:55 +02:00
fromCfg . Devices = nil
toCfg . Devices = nil
2016-03-16 12:16:38 +01:00
fromCfg . Label = ""
toCfg . Label = ""
2015-07-22 09:02:55 +02:00
if ! reflect . DeepEqual ( fromCfg , toCfg ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, folder" , folderID , "configuration differs" )
2015-07-22 09:02:55 +02:00
return false
}
2015-06-03 09:47:39 +02:00
}
2015-07-22 09:02:55 +02:00
2015-11-11 21:20:34 -05:00
// Removing a device requires restart
2015-07-22 09:02:55 +02:00
toDevs := mapDeviceCfgs ( from . Devices )
2015-06-03 09:47:39 +02:00
for _ , dev := range from . Devices {
if _ , ok := toDevs [ dev . DeviceID ] ; ! ok {
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, device" , dev . DeviceID , "was removed" )
2015-06-03 09:47:39 +02:00
return false
}
}
2016-01-18 10:06:31 -08:00
// Some options don't require restart as those components handle it fine
// by themselves.
from . Options . URAccepted = to . Options . URAccepted
from . Options . URUniqueID = to . Options . URUniqueID
2016-05-09 11:30:19 +00:00
from . Options . ListenAddresses = to . Options . ListenAddresses
2016-05-17 00:05:38 +00:00
from . Options . RelaysEnabled = to . Options . RelaysEnabled
2016-01-18 10:06:31 -08:00
// All of the other generic options require restart. Or at least they may;
// removing this check requires going through those options carefully and
// making sure there are individual services that handle them correctly.
// This code is the "original" requires-restart check and protects other
// components that haven't yet been converted to VerifyConfig/CommitConfig
// handling.
2015-06-03 09:47:39 +02:00
if ! reflect . DeepEqual ( from . Options , to . Options ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( m , "requires restart, options differ" )
2015-06-03 09:47:39 +02:00
return false
}
return true
}
2015-07-22 09:02:55 +02:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
// mapDeviceCfgs returns a map of device ID to nothing for the given slice of
// device configurations.
func mapDeviceCfgs ( devices [ ] config . DeviceConfiguration ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev . DeviceID ] = struct { } { }
}
return m
}
2016-04-18 18:35:31 +00:00
func filterIndex ( folder string , fs [ ] protocol . FileInfo , dropDeletes bool , ignores * ignore . Matcher ) [ ] protocol . FileInfo {
2015-07-21 13:14:33 +02:00
for i := 0 ; i < len ( fs ) ; {
if fs [ i ] . Flags &^ protocol . FlagsAll != 0 {
2015-10-03 17:25:21 +02:00
l . Debugln ( "dropping update for file with unknown bits set" , fs [ i ] )
2015-07-21 13:14:33 +02:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else if fs [ i ] . IsDeleted ( ) && dropDeletes {
2015-10-03 17:25:21 +02:00
l . Debugln ( "dropping update for undesired delete" , fs [ i ] )
2015-07-21 13:14:33 +02:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else if symlinkInvalid ( folder , fs [ i ] ) {
2015-10-03 17:25:21 +02:00
l . Debugln ( "dropping update for unsupported symlink" , fs [ i ] )
2015-07-21 13:14:33 +02:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
2016-04-18 18:35:31 +00:00
} else if ignores != nil && ignores . Match ( fs [ i ] . Name ) . IsIgnored ( ) {
l . Debugln ( "dropping update for ignored item" , fs [ i ] )
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
2015-07-21 13:14:33 +02:00
} else {
i ++
}
}
return fs
}
2015-06-15 00:44:24 +02:00
func symlinkInvalid ( folder string , fi db . FileIntf ) bool {
if ! symlinks . Supported && fi . IsSymlink ( ) && ! fi . IsInvalid ( ) && ! fi . IsDeleted ( ) {
symlinkWarning . Do ( func ( ) {
2015-04-28 18:34:55 +03:00
l . Warnln ( "Symlinks are disabled, unsupported or require Administrator privileges. This might cause your folder to appear out of sync." )
2014-11-09 04:26:52 +00:00
} )
2015-06-15 00:44:24 +02:00
// Need to type switch for the concrete type to be able to access fields...
var name string
switch fi := fi . ( type ) {
case protocol . FileInfo :
name = fi . Name
case db . FileInfoTruncated :
name = fi . Name
}
l . Infoln ( "Unsupported symlink" , name , "in folder" , folder )
2014-11-09 04:26:52 +00:00
return true
}
return false
}
2015-04-25 22:53:44 +01:00
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk ( data [ ] string , skip , get int ) ( [ ] string , int , int ) {
l := len ( data )
if l <= skip {
return [ ] string { } , skip - l , get
} else if l < skip + get {
return data [ skip : l ] , 0 , get - ( l - skip )
}
return data [ skip : skip + get ] , 0 , 0
}
2015-07-22 09:02:55 +02:00
func closeRawConn ( conn io . Closer ) error {
if conn , ok := conn . ( * tls . Conn ) ; ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline set.
conn . SetWriteDeadline ( time . Now ( ) . Add ( 250 * time . Millisecond ) )
}
return conn . Close ( )
}
2015-11-13 13:30:52 +01:00
func stringSliceWithout ( ss [ ] string , s string ) [ ] string {
for i := range ss {
if ss [ i ] == s {
copy ( ss [ i : ] , ss [ i + 1 : ] )
ss = ss [ : len ( ss ) - 1 ]
return ss
}
}
return ss
}
2016-03-18 08:28:44 +00:00
2016-04-15 10:59:41 +00:00
func readOffsetIntoBuf ( file string , offset int64 , buf [ ] byte ) error {
fd , err := os . Open ( file )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.Open" , file , err )
return err
}
defer fd . Close ( )
_ , err = fd . ReadAt ( buf , offset )
if err != nil {
l . Debugln ( "readOffsetIntoBuf.ReadAt" , file , err )
}
return err
}
2016-04-09 11:25:06 +00:00
// The exists function is expected to return true for all known paths
// (excluding "" and ".")
2016-03-18 08:28:44 +00:00
func unifySubs ( dirs [ ] string , exists func ( dir string ) bool ) [ ] string {
2016-04-09 11:25:06 +00:00
subs := trimUntilParentKnown ( dirs , exists )
sort . Strings ( subs )
return simplifySortedPaths ( subs )
}
2016-03-18 08:28:44 +00:00
2016-04-09 11:25:06 +00:00
func trimUntilParentKnown ( dirs [ ] string , exists func ( dir string ) bool ) [ ] string {
var subs [ ] string
2016-03-18 08:28:44 +00:00
for _ , sub := range dirs {
for sub != "" && sub != ".stfolder" && sub != ".stignore" {
2016-04-09 11:25:06 +00:00
sub = filepath . Clean ( sub )
parent := filepath . Dir ( sub )
if parent == "." || exists ( parent ) {
2016-03-18 08:28:44 +00:00
break
}
2016-04-09 11:25:06 +00:00
sub = parent
2016-03-18 08:28:44 +00:00
if sub == "." || sub == string ( filepath . Separator ) {
// Shortcut. We are going to scan the full folder, so we can
// just return an empty list of subs at this point.
return nil
}
}
2016-04-09 11:25:06 +00:00
if sub == "" {
return nil
}
2016-03-18 08:28:44 +00:00
subs = append ( subs , sub )
}
2016-04-09 11:25:06 +00:00
return subs
}
2016-03-18 08:28:44 +00:00
2016-04-09 11:25:06 +00:00
func simplifySortedPaths ( subs [ ] string ) [ ] string {
2016-03-18 08:28:44 +00:00
var cleaned [ ] string
next :
for _ , sub := range subs {
for _ , existing := range cleaned {
if sub == existing || strings . HasPrefix ( sub , existing + string ( os . PathSeparator ) ) {
continue next
}
}
cleaned = append ( cleaned , sub )
}
return cleaned
}
2016-05-01 06:49:29 +00:00
// makeForgetUpdate takes an index update and constructs a download progress update
// causing to forget any progress for files which we've just been sent.
func makeForgetUpdate ( files [ ] protocol . FileInfo ) [ ] protocol . FileDownloadProgressUpdate {
updates := make ( [ ] protocol . FileDownloadProgressUpdate , 0 , len ( files ) )
for _ , file := range files {
if file . IsSymlink ( ) || file . IsDirectory ( ) || file . IsDeleted ( ) {
continue
}
updates = append ( updates , protocol . FileDownloadProgressUpdate {
Name : file . Name ,
Version : file . Version ,
UpdateType : protocol . UpdateTypeForget ,
} )
}
return updates
}