diff --git a/README.md b/README.md index e4cfd5eef..6d0260ee4 100644 --- a/README.md +++ b/README.md @@ -587,7 +587,7 @@ Other notes: Each user can be mapped with a Google Cloud Storage bucket or a bucket virtual folder, this way the mapped bucket/virtual folder is exposed over SFTP/SCP. This backend is very similar to the S3 backend and it has the same limitations. -To connect SFTPGo to Google Cloud Storage you need a credentials file that you can obtain from the Google Cloud Console, take a look at the "Setting up authentication" section [here](https://cloud.google.com/storage/docs/reference/libraries) for details. +To connect SFTPGo to Google Cloud Storage you can use use the Application Default Credentials (ADC) strategy to try to find your application's credentials automatically or you can explicitly provide a JSON credentials file that you can obtain from the Google Cloud Console, take a look [here](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application) for details. You can optionally specify a [storage class](https://cloud.google.com/storage/docs/storage-classes) too, leave blank to use the default storage class. @@ -618,29 +618,30 @@ Usage: sftpgo portable [flags] Flags: - -C, --advertise-credentials If the SFTP service is advertised via multicast DNS this flag allows to put username/password inside the advertised TXT record - -S, --advertise-service Advertise SFTP service using multicast DNS (default true) - -d, --directory string Path to the directory to serve. This can be an absolute path or a path relative to the current directory (default ".") - -f, --fs-provider int 0 means local filesystem, 1 Amazon S3 compatible, 2 Google Cloud Storage + -C, --advertise-credentials If the SFTP service is advertised via multicast DNS this flag allows to put username/password inside the advertised TXT record + -S, --advertise-service Advertise SFTP service using multicast DNS (default true) + -d, --directory string Path to the directory to serve. This can be an absolute path or a path relative to the current directory (default ".") + -f, --fs-provider int 0 means local filesystem, 1 Amazon S3 compatible, 2 Google Cloud Storage + --gcs-automatic-credentials int 0 means explicit credentials using a JSON credentials file, 1 automatic (default 1) --gcs-bucket string - --gcs-credentials-file string Google Cloud Storage JSON credentials file - --gcs-key-prefix string Allows to restrict access to the virtual folder identified by this prefix and its contents + --gcs-credentials-file string Google Cloud Storage JSON credentials file + --gcs-key-prefix string Allows to restrict access to the virtual folder identified by this prefix and its contents --gcs-storage-class string - -h, --help help for portable - -l, --log-file-path string Leave empty to disable logging - -p, --password string Leave empty to use an auto generated value - -g, --permissions strings User's permissions. "*" means any permission (default [list,download]) + -h, --help help for portable + -l, --log-file-path string Leave empty to disable logging + -p, --password string Leave empty to use an auto generated value + -g, --permissions strings User's permissions. "*" means any permission (default [list,download]) -k, --public-key strings --s3-access-key string --s3-access-secret string --s3-bucket string --s3-endpoint string - --s3-key-prefix string Allows to restrict access to the virtual folder identified by this prefix and its contents + --s3-key-prefix string Allows to restrict access to the virtual folder identified by this prefix and its contents --s3-region string --s3-storage-class string - -s, --sftpd-port int 0 means a random non privileged port - -c, --ssh-commands strings SSH commands to enable. "*" means any supported SSH command including scp (default [md5sum,sha1sum,cd,pwd]) - -u, --username string Leave empty to use an auto generated value + -s, --sftpd-port int 0 means a random non privileged port + -c, --ssh-commands strings SSH commands to enable. "*" means any supported SSH command including scp (default [md5sum,sha1sum,cd,pwd]) + -u, --username string Leave empty to use an auto generated value ``` In portable mode SFTPGo can advertise the SFTP service and, optionally, the credentials via multicast DNS, so there is a standard way to discover the service and to automatically connect to it. @@ -696,6 +697,7 @@ For each account the following properties can be configured: - `s3_key_prefix`, allows to restrict access to the virtual folder identified by this prefix and its contents - `gcs_bucket`, required for GCS filesystem - `gcs_credentials`, Google Cloud Storage JSON credentials base64 encoded +- `gcs_automatic_credentials`, integer. Set to 1 to use Application Default Credentials strategy or set to 0 to use explicit credentials via `gcs_credentials` - `gcs_storage_class` - `gcs_key_prefix`, allows to restrict access to the virtual folder identified by this prefix and its contents diff --git a/cmd/portable.go b/cmd/portable.go index 11c0d84ee..80869b318 100644 --- a/cmd/portable.go +++ b/cmd/portable.go @@ -35,6 +35,7 @@ var ( portableS3KeyPrefix string portableGCSBucket string portableGCSCredentialsFile string + portableGCSAutoCredentials int portableGCSStorageClass string portableGCSKeyPrefix string portableCmd = &cobra.Command{ @@ -48,12 +49,16 @@ Please take a look at the usage below to customize the serving parameters`, Run: func(cmd *cobra.Command, args []string) { portableDir := directoryToServe if !filepath.IsAbs(portableDir) { - portableDir, _ = filepath.Abs(portableDir) + if portableFsProvider == 0 { + portableDir, _ = filepath.Abs(portableDir) + } else { + portableDir = os.TempDir() + } } permissions := make(map[string][]string) permissions["/"] = portablePermissions portableGCSCredentials := "" - if portableFsProvider == 2 { + if portableFsProvider == 2 && len(portableGCSCredentialsFile) > 0 { fi, err := os.Stat(portableGCSCredentialsFile) if err != nil { fmt.Printf("Invalid GCS credentials file: %v\n", err) @@ -69,6 +74,7 @@ Please take a look at the usage below to customize the serving parameters`, fmt.Printf("Unable to read credentials file: %v\n", err) } portableGCSCredentials = base64.StdEncoding.EncodeToString(creds) + portableGCSAutoCredentials = 0 } service := service.Service{ ConfigDir: defaultConfigDir, @@ -100,10 +106,11 @@ Please take a look at the usage below to customize the serving parameters`, KeyPrefix: portableS3KeyPrefix, }, GCSConfig: vfs.GCSFsConfig{ - Bucket: portableGCSBucket, - Credentials: portableGCSCredentials, - StorageClass: portableGCSStorageClass, - KeyPrefix: portableGCSKeyPrefix, + Bucket: portableGCSBucket, + Credentials: portableGCSCredentials, + AutomaticCredentials: portableGCSAutoCredentials, + StorageClass: portableGCSStorageClass, + KeyPrefix: portableGCSKeyPrefix, }, }, }, @@ -147,5 +154,7 @@ func init() { portableCmd.Flags().StringVar(&portableGCSKeyPrefix, "gcs-key-prefix", "", "Allows to restrict access to the virtual folder "+ "identified by this prefix and its contents") portableCmd.Flags().StringVar(&portableGCSCredentialsFile, "gcs-credentials-file", "", "Google Cloud Storage JSON credentials file") + portableCmd.Flags().IntVar(&portableGCSAutoCredentials, "gcs-automatic-credentials", 1, "0 means explicit credentials using a JSON "+ + "credentials file, 1 automatic") rootCmd.AddCommand(portableCmd) } diff --git a/dataprovider/dataprovider.go b/dataprovider/dataprovider.go index 48d6c0d2e..50003f9d8 100644 --- a/dataprovider/dataprovider.go +++ b/dataprovider/dataprovider.go @@ -813,6 +813,9 @@ func addCredentialsToUser(user *User) error { if user.FsConfig.Provider != 2 { return nil } + if user.FsConfig.GCSConfig.AutomaticCredentials > 0 { + return nil + } cred, err := ioutil.ReadFile(user.getGCSCredentialsFilePath()) if err != nil { return err diff --git a/dataprovider/user.go b/dataprovider/user.go index d55277bdf..51acfc251 100644 --- a/dataprovider/user.go +++ b/dataprovider/user.go @@ -419,10 +419,11 @@ func (u *User) getACopy() User { KeyPrefix: u.FsConfig.S3Config.KeyPrefix, }, GCSConfig: vfs.GCSFsConfig{ - Bucket: u.FsConfig.GCSConfig.Bucket, - CredentialFile: u.FsConfig.GCSConfig.CredentialFile, - StorageClass: u.FsConfig.GCSConfig.StorageClass, - KeyPrefix: u.FsConfig.GCSConfig.KeyPrefix, + Bucket: u.FsConfig.GCSConfig.Bucket, + CredentialFile: u.FsConfig.GCSConfig.CredentialFile, + AutomaticCredentials: u.FsConfig.GCSConfig.AutomaticCredentials, + StorageClass: u.FsConfig.GCSConfig.StorageClass, + KeyPrefix: u.FsConfig.GCSConfig.KeyPrefix, }, } diff --git a/httpd/api_utils.go b/httpd/api_utils.go index 98c8f659c..4391e671e 100644 --- a/httpd/api_utils.go +++ b/httpd/api_utils.go @@ -462,6 +462,9 @@ func compareUserFsConfig(expected *dataprovider.User, actual *dataprovider.User) expected.FsConfig.GCSConfig.KeyPrefix+"/" != actual.FsConfig.GCSConfig.KeyPrefix { return errors.New("GCS key prefix mismatch") } + if expected.FsConfig.GCSConfig.AutomaticCredentials != actual.FsConfig.GCSConfig.AutomaticCredentials { + return errors.New("GCS automatic credentials mismatch") + } return nil } diff --git a/httpd/httpd_test.go b/httpd/httpd_test.go index 60703ba65..37ea7dd1b 100644 --- a/httpd/httpd_test.go +++ b/httpd/httpd_test.go @@ -352,6 +352,7 @@ func TestAddUserInvalidFsConfig(t *testing.T) { } u.FsConfig.GCSConfig.KeyPrefix = "somedir/subdir/" u.FsConfig.GCSConfig.Credentials = "" + u.FsConfig.GCSConfig.AutomaticCredentials = 0 _, _, err = httpd.AddUser(u, http.StatusBadRequest) if err != nil { t.Errorf("unexpected error adding user with invalid fs config: %v", err) @@ -519,6 +520,14 @@ func TestUserGCSConfig(t *testing.T) { if err != nil { t.Errorf("unable to add user: %v", err) } + os.RemoveAll(credentialsPath) + os.MkdirAll(credentialsPath, 0700) + user.FsConfig.GCSConfig.Credentials = "" + user.FsConfig.GCSConfig.AutomaticCredentials = 1 + user, _, err = httpd.UpdateUser(user, http.StatusOK) + if err != nil { + t.Errorf("unable to update user: %v", err) + } user.FsConfig.Provider = 1 user.FsConfig.S3Config.Bucket = "test1" user.FsConfig.S3Config.Region = "us-east-1" @@ -1937,6 +1946,26 @@ func TestWebUserGCSMock(t *testing.T) { if updateUser.FsConfig.GCSConfig.KeyPrefix != user.FsConfig.GCSConfig.KeyPrefix { t.Error("GCS key prefix mismatch") } + form.Set("gcs_auto_credentials", "on") + b, contentType, _ = getMultipartFormData(form, "", "") + req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b) + req.Header.Set("Content-Type", contentType) + rr = executeRequest(req) + checkResponseCode(t, http.StatusSeeOther, rr.Code) + req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil) + rr = executeRequest(req) + checkResponseCode(t, http.StatusOK, rr.Code) + err = render.DecodeJSON(rr.Body, &users) + if err != nil { + t.Errorf("Error decoding users: %v", err) + } + if len(users) != 1 { + t.Errorf("1 user is expected") + } + updateUser = users[0] + if updateUser.FsConfig.GCSConfig.AutomaticCredentials != 1 { + t.Error("GCS automatic credentials mismatch") + } req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil) rr = executeRequest(req) checkResponseCode(t, http.StatusOK, rr.Code) diff --git a/httpd/internal_test.go b/httpd/internal_test.go index 388dbbc77..6ad8aef76 100644 --- a/httpd/internal_test.go +++ b/httpd/internal_test.go @@ -286,8 +286,13 @@ func TestCompareUserFsConfig(t *testing.T) { t.Errorf("S3 key prefix does not match") } expected.FsConfig.S3Config.KeyPrefix = "" +} + +func TestCompareUserGCSConfig(t *testing.T) { + expected := &dataprovider.User{} + actual := &dataprovider.User{} expected.FsConfig.GCSConfig.KeyPrefix = "somedir/subdir" - err = compareUserFsConfig(expected, actual) + err := compareUserFsConfig(expected, actual) if err == nil { t.Errorf("GCS key prefix does not match") } @@ -304,6 +309,12 @@ func TestCompareUserFsConfig(t *testing.T) { t.Errorf("GCS storage class does not match") } expected.FsConfig.GCSConfig.StorageClass = "" + expected.FsConfig.GCSConfig.AutomaticCredentials = 1 + err = compareUserFsConfig(expected, actual) + if err == nil { + t.Errorf("GCS automatic credentials does not match") + } + expected.FsConfig.GCSConfig.AutomaticCredentials = 0 } func TestGCSWebInvalidFormFile(t *testing.T) { diff --git a/httpd/schema/openapi.yaml b/httpd/schema/openapi.yaml index fc096d900..87dee1c03 100644 --- a/httpd/schema/openapi.yaml +++ b/httpd/schema/openapi.yaml @@ -2,7 +2,7 @@ openapi: 3.0.1 info: title: SFTPGo description: 'SFTPGo REST API' - version: 1.7.0 + version: 1.8.0 servers: - url: /api/v1 @@ -987,6 +987,16 @@ components: type: string format: byte description: Google Cloud Storage JSON credentials base64 encoded. This field must be populated only when adding/updating an user. It will be always omitted, since there are sensitive data, when you search/get users. The credentials will be stored in the configured "credentials_path" + automatic_credentials: + type: integer + nullable: true + enum: + - 0 + - 1 + description: > + Automatic credentials: + * `0` - disabled, explicit credentials, using a JSON credentials file, must be provided. This is the default value if the field is null + * `1` - enabled, we try to use the Application Default Credentials (ADC) strategy to find your application's credentials storage_class: type: string key_prefix: diff --git a/httpd/web.go b/httpd/web.go index ee7c6f70d..3efbea267 100644 --- a/httpd/web.go +++ b/httpd/web.go @@ -246,6 +246,12 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er fs.GCSConfig.Bucket = r.Form.Get("gcs_bucket") fs.GCSConfig.StorageClass = r.Form.Get("gcs_storage_class") fs.GCSConfig.KeyPrefix = r.Form.Get("gcs_key_prefix") + autoCredentials := r.Form.Get("gcs_auto_credentials") + if len(autoCredentials) > 0 { + fs.GCSConfig.AutomaticCredentials = 1 + } else { + fs.GCSConfig.AutomaticCredentials = 0 + } credentials, _, err := r.FormFile("gcs_credential_file") if err == http.ErrMissingFile { return fs, nil @@ -262,6 +268,7 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er return fs, err } fs.GCSConfig.Credentials = base64.StdEncoding.EncodeToString(fileBytes) + fs.GCSConfig.AutomaticCredentials = 0 } return fs, nil } diff --git a/scripts/sftpgo_api_cli.py b/scripts/sftpgo_api_cli.py index 9fc8cc19f..e988707d0 100755 --- a/scripts/sftpgo_api_cli.py +++ b/scripts/sftpgo_api_cli.py @@ -75,7 +75,8 @@ def buildUserObject(self, user_id=0, username='', password='', public_keys=[], h max_sessions=0, quota_size=0, quota_files=0, permissions={}, upload_bandwidth=0, download_bandwidth=0, status=1, expiration_date=0, allowed_ip=[], denied_ip=[], fs_provider='local', s3_bucket='', s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='', - s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file=''): + s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', + gcs_automatic_credentials='automatic'): user = {'id':user_id, 'username':username, 'uid':uid, 'gid':gid, 'max_sessions':max_sessions, 'quota_size':quota_size, 'quota_files':quota_files, 'upload_bandwidth':upload_bandwidth, 'download_bandwidth':download_bandwidth, @@ -95,7 +96,8 @@ def buildUserObject(self, user_id=0, username='', password='', public_keys=[], h user.update({'filters':self.buildFilters(allowed_ip, denied_ip)}) user.update({'filesystem':self.buildFsConfig(fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, - gcs_key_prefix, gcs_storage_class, gcs_credentials_file)}) + gcs_key_prefix, gcs_storage_class, gcs_credentials_file, + gcs_automatic_credentials)}) return user def buildPermissions(self, root_perms, subdirs_perms): @@ -130,7 +132,8 @@ def buildFilters(self, allowed_ip, denied_ip): return filters def buildFsConfig(self, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, - s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, gcs_credentials_file): + s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, + gcs_credentials_file, gcs_automatic_credentials): fs_config = {'provider':0} if fs_provider == 'S3': s3config = {'bucket':s3_bucket, 'region':s3_region, 'access_key':s3_access_key, 'access_secret': @@ -139,9 +142,14 @@ def buildFsConfig(self, fs_provider, s3_bucket, s3_region, s3_access_key, s3_acc fs_config.update({'provider':1, 's3config':s3config}) elif fs_provider == 'GCS': gcsconfig = {'bucket':gcs_bucket, 'key_prefix':gcs_key_prefix, 'storage_class':gcs_storage_class} + if gcs_automatic_credentials == "automatic": + gcsconfig.update({'automatic_credentials':1}) + else: + gcsconfig.update({'automatic_credentials':0}) if gcs_credentials_file: with open(gcs_credentials_file) as creds: - gcsconfig.update({'credentials':base64.b64encode(creds.read().encode('UTF-8')).decode('UTF-8')}) + gcsconfig.update({'credentials':base64.b64encode(creds.read().encode('UTF-8')).decode('UTF-8'), + 'automatic_credentials':0}) fs_config.update({'provider':2, 'gcsconfig':gcsconfig}) return fs_config @@ -158,12 +166,12 @@ def addUser(self, username='', password='', public_keys='', home_dir='', uid=0, quota_files=0, perms=[], upload_bandwidth=0, download_bandwidth=0, status=1, expiration_date=0, subdirs_permissions=[], allowed_ip=[], denied_ip=[], fs_provider='local', s3_bucket='', s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='', s3_key_prefix='', gcs_bucket='', - gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file=''): + gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', gcs_automatic_credentials='automatic'): u = self.buildUserObject(0, username, password, public_keys, home_dir, uid, gid, max_sessions, quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth, status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, - gcs_credentials_file) + gcs_credentials_file, gcs_automatic_credentials) r = requests.post(self.userPath, json=u, auth=self.auth, verify=self.verify) self.printResponse(r) @@ -171,12 +179,13 @@ def updateUser(self, user_id, username='', password='', public_keys='', home_dir quota_size=0, quota_files=0, perms=[], upload_bandwidth=0, download_bandwidth=0, status=1, expiration_date=0, subdirs_permissions=[], allowed_ip=[], denied_ip=[], fs_provider='local', s3_bucket='', s3_region='', s3_access_key='', s3_access_secret='', s3_endpoint='', s3_storage_class='', - s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file=''): + s3_key_prefix='', gcs_bucket='', gcs_key_prefix='', gcs_storage_class='', gcs_credentials_file='', + gcs_automatic_credentials='automatic'): u = self.buildUserObject(user_id, username, password, public_keys, home_dir, uid, gid, max_sessions, quota_size, quota_files, self.buildPermissions(perms, subdirs_permissions), upload_bandwidth, download_bandwidth, status, expiration_date, allowed_ip, denied_ip, fs_provider, s3_bucket, s3_region, s3_access_key, s3_access_secret, s3_endpoint, s3_storage_class, s3_key_prefix, gcs_bucket, gcs_key_prefix, gcs_storage_class, - gcs_credentials_file) + gcs_credentials_file, gcs_automatic_credentials) r = requests.put(urlparse.urljoin(self.userPath, 'user/' + str(user_id)), json=u, auth=self.auth, verify=self.verify) self.printResponse(r) @@ -448,6 +457,8 @@ def addCommonUserArguments(parser): ' Default: %(default)s') parser.add_argument('--gcs-storage-class', type=str, default='', help='Default: %(default)s') parser.add_argument('--gcs-credentials-file', type=str, default='', help='Default: %(default)s') + parser.add_argument('--gcs-automatic-credentials', type=str, default='automatic', choices=['explicit', 'automatic'], + help='If you provide a credentials file this argument will be setted to "explicit". Default: %(default)s') if __name__ == '__main__': @@ -558,7 +569,7 @@ def addCommonUserArguments(parser): args.status, getDatetimeAsMillisSinceEpoch(args.expiration_date), args.subdirs_permissions, args.allowed_ip, args.denied_ip, args.fs, args.s3_bucket, args.s3_region, args.s3_access_key, args.s3_access_secret, args.s3_endpoint, args.s3_storage_class, args.s3_key_prefix, args.gcs_bucket, args.gcs_key_prefix, - args.gcs_storage_class, args.gcs_credentials_file) + args.gcs_storage_class, args.gcs_credentials_file, args.gcs_automatic_credentials) elif args.command == 'update-user': api.updateUser(args.id, args.username, args.password, args.public_keys, args.home_dir, args.uid, args.gid, args.max_sessions, args.quota_size, args.quota_files, args.permissions, args.upload_bandwidth, @@ -566,7 +577,7 @@ def addCommonUserArguments(parser): args.subdirs_permissions, args.allowed_ip, args.denied_ip, args.fs, args.s3_bucket, args.s3_region, args.s3_access_key, args.s3_access_secret, args.s3_endpoint, args.s3_storage_class, args.s3_key_prefix, args.gcs_bucket, args.gcs_key_prefix, args.gcs_storage_class, - args.gcs_credentials_file) + args.gcs_credentials_file, args.gcs_automatic_credentials) elif args.command == 'delete-user': api.deleteUser(args.id) elif args.command == 'get-users': diff --git a/service/service.go b/service/service.go index 3e2ac440c..9cf9fdf39 100644 --- a/service/service.go +++ b/service/service.go @@ -172,52 +172,60 @@ func (s *Service) StartPortableMode(sftpdPort int, enabledSSHCommands []string, config.SetSFTPDConfig(sftpdConf) err = s.Start() - if err == nil { - var mDNSService *zeroconf.Server - var err error - if advertiseService { - version := utils.GetAppVersion() - meta := []string{ - fmt.Sprintf("version=%v", version.GetVersionAsString()), - } - if advertiseCredentials { - logger.InfoToConsole("Advertising credentials via multicast DNS") - meta = append(meta, fmt.Sprintf("user=%v", s.PortableUser.Username)) - if len(s.PortableUser.Password) > 0 { - meta = append(meta, fmt.Sprintf("password=%v", s.PortableUser.Password)) - } else { - logger.InfoToConsole("Unable to advertise key based credentials via multicast DNS, we don't have the private key") - } - } - mDNSService, err = zeroconf.Register( - fmt.Sprintf("SFTPGo portable %v", sftpdConf.BindPort), // service instance name - "_sftp-ssh._tcp", // service type and protocol - "local.", // service domain - sftpdConf.BindPort, // service port - meta, // service metadata - nil, // register on all network interfaces - ) - if err != nil { - mDNSService = nil - logger.WarnToConsole("Unable to advertise SFTP service via multicast DNS: %v", err) + if err != nil { + return err + } + var mDNSService *zeroconf.Server + if advertiseService { + version := utils.GetAppVersion() + meta := []string{ + fmt.Sprintf("version=%v", version.GetVersionAsString()), + } + if advertiseCredentials { + logger.InfoToConsole("Advertising credentials via multicast DNS") + meta = append(meta, fmt.Sprintf("user=%v", s.PortableUser.Username)) + if len(s.PortableUser.Password) > 0 { + meta = append(meta, fmt.Sprintf("password=%v", s.PortableUser.Password)) } else { - logger.InfoToConsole("SFTP service advertised via multicast DNS") + logger.InfoToConsole("Unable to advertise key based credentials via multicast DNS, we don't have the private key") } + } + mDNSService, err = zeroconf.Register( + fmt.Sprintf("SFTPGo portable %v", sftpdConf.BindPort), // service instance name + "_sftp-ssh._tcp", // service type and protocol + "local.", // service domain + sftpdConf.BindPort, // service port + meta, // service metadata + nil, // register on all network interfaces + ) + if err != nil { + mDNSService = nil + logger.WarnToConsole("Unable to advertise SFTP service via multicast DNS: %v", err) + } else { + logger.InfoToConsole("SFTP service advertised via multicast DNS") + } + } + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGTERM) + go func() { + <-sig + if mDNSService != nil { + logger.InfoToConsole("unregistering multicast DNS service") + mDNSService.Shutdown() } - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt, syscall.SIGTERM) - go func() { - <-sig - if mDNSService != nil { - logger.InfoToConsole("unregistering multicast DNS service") - mDNSService.Shutdown() - } - s.Stop() - }() - logger.InfoToConsole("Portable mode ready, SFTP port: %v, user: %#v, password: %#v, public keys: %v, directory: %#v, "+ - "permissions: %v, enabled ssh commands: %v", sftpdConf.BindPort, s.PortableUser.Username, s.PortableUser.Password, - s.PortableUser.PublicKeys, s.PortableUser.HomeDir, s.PortableUser.Permissions, sftpdConf.EnabledSSHCommands) + s.Stop() + }() + var dirToServe string + if s.PortableUser.FsConfig.Provider == 1 { + dirToServe = s.PortableUser.FsConfig.S3Config.KeyPrefix + } else if s.PortableUser.FsConfig.Provider == 2 { + dirToServe = s.PortableUser.FsConfig.GCSConfig.KeyPrefix + } else { + dirToServe = s.PortableUser.HomeDir } - return err + logger.InfoToConsole("Portable mode ready, SFTP port: %v, user: %#v, password: %#v, public keys: %v, directory: %#v, "+ + "permissions: %v, enabled ssh commands: %v", sftpdConf.BindPort, s.PortableUser.Username, s.PortableUser.Password, + s.PortableUser.PublicKeys, dirToServe, s.PortableUser.Permissions, sftpdConf.EnabledSSHCommands) + return nil } diff --git a/templates/user.html b/templates/user.html index 74b94f6ed..9315c6f1d 100644 --- a/templates/user.html +++ b/templates/user.html @@ -203,13 +203,13 @@