Browse Source
Fix sftp performances and add seaweedfs all-in-one deployment (#6792)
Fix sftp performances and add seaweedfs all-in-one deployment (#6792)
* improve perfs & fix rclone & refactoring Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com> * improve perfs on download + add seaweedfs all-in-one deployment Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com> * use helper for topologySpreadConstraints and fix create home dir of sftp users Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com> * fix helm lint Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com> * add missing ctx param Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com> --------- Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>pull/6816/head
committed by
GitHub
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1171 additions and 805 deletions
-
24k8s/charts/seaweedfs/templates/_helpers.tpl
-
427k8s/charts/seaweedfs/templates/all-in-one-deployment.yaml
-
21k8s/charts/seaweedfs/templates/all-in-one-pvc.yaml
-
83k8s/charts/seaweedfs/templates/all-in-one-service.yml
-
29k8s/charts/seaweedfs/templates/all-in-one-servicemonitor.yaml
-
9k8s/charts/seaweedfs/templates/cosi-deployment.yaml
-
4k8s/charts/seaweedfs/templates/filer-statefulset.yaml
-
2k8s/charts/seaweedfs/templates/master-configmap.yaml
-
6k8s/charts/seaweedfs/templates/master-statefulset.yaml
-
8k8s/charts/seaweedfs/templates/s3-deployment.yaml
-
2k8s/charts/seaweedfs/templates/s3-secret.yaml
-
27k8s/charts/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml
-
8k8s/charts/seaweedfs/templates/sftp-deployment.yaml
-
2k8s/charts/seaweedfs/templates/sftp-secret.yaml
-
7k8s/charts/seaweedfs/templates/sftp-service.yaml
-
6k8s/charts/seaweedfs/templates/volume-statefulset.yaml
-
148k8s/charts/seaweedfs/values.yaml
-
9weed/sftpd/auth/auth.go
-
11weed/sftpd/auth/password.go
-
19weed/sftpd/auth/publickey.go
-
99weed/sftpd/sftp_file_reader.go
-
44weed/sftpd/sftp_file_writer.go
-
261weed/sftpd/sftp_filer.go
-
89weed/sftpd/sftp_permissions.go
-
63weed/sftpd/sftp_server.go
-
100weed/sftpd/sftp_service.go
-
143weed/sftpd/sftp_userstore.go
-
35weed/sftpd/user/filestore.go
-
204weed/sftpd/user/homemanager.go
-
34weed/sftpd/user/user.go
-
52weed/sftpd/utils/lru_cache.go
@ -0,0 +1,427 @@ |
|||||
|
{{- if .Values.allInOne.enabled }} |
||||
|
apiVersion: apps/v1 |
||||
|
kind: Deployment |
||||
|
metadata: |
||||
|
name: {{ template "seaweedfs.name" . }}-all-in-one |
||||
|
namespace: {{ .Release.Namespace }} |
||||
|
labels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} |
||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
{{- if .Values.allInOne.annotations }} |
||||
|
annotations: |
||||
|
{{- toYaml .Values.allInOne.annotations | nindent 4 }} |
||||
|
{{- end }} |
||||
|
spec: |
||||
|
replicas: 1 |
||||
|
strategy: |
||||
|
type: Recreate |
||||
|
selector: |
||||
|
matchLabels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
template: |
||||
|
metadata: |
||||
|
labels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
{{- with .Values.podLabels }} |
||||
|
{{- toYaml . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
{{- with .Values.allInOne.podLabels }} |
||||
|
{{- toYaml . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
annotations: |
||||
|
{{- with .Values.podAnnotations }} |
||||
|
{{- toYaml . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
{{- with .Values.allInOne.podAnnotations }} |
||||
|
{{- toYaml . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
spec: |
||||
|
restartPolicy: {{ default .Values.global.restartPolicy .Values.allInOne.restartPolicy }} |
||||
|
{{- if .Values.allInOne.affinity }} |
||||
|
affinity: |
||||
|
{{ tpl .Values.allInOne.affinity . | nindent 8 | trim }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.topologySpreadConstraints }} |
||||
|
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "all-in-one") | nindent 6 }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.tolerations }} |
||||
|
tolerations: |
||||
|
{{- tpl .Values.allInOne.tolerations . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }} |
||||
|
terminationGracePeriodSeconds: 60 |
||||
|
enableServiceLinks: false |
||||
|
{{- if .Values.allInOne.priorityClassName }} |
||||
|
priorityClassName: {{ .Values.allInOne.priorityClassName | quote }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.serviceAccountName }} |
||||
|
serviceAccountName: {{ .Values.allInOne.serviceAccountName | quote }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.initContainers }} |
||||
|
initContainers: |
||||
|
{{- tpl .Values.allInOne.initContainers . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.podSecurityContext.enabled }} |
||||
|
securityContext: |
||||
|
{{- omit .Values.allInOne.podSecurityContext "enabled" | toYaml | nindent 8 }} |
||||
|
{{- end }} |
||||
|
containers: |
||||
|
- name: seaweedfs |
||||
|
image: {{ template "master.image" . }} |
||||
|
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} |
||||
|
env: |
||||
|
- name: POD_IP |
||||
|
valueFrom: |
||||
|
fieldRef: |
||||
|
fieldPath: status.podIP |
||||
|
- name: POD_NAME |
||||
|
valueFrom: |
||||
|
fieldRef: |
||||
|
fieldPath: metadata.name |
||||
|
- name: NAMESPACE |
||||
|
valueFrom: |
||||
|
fieldRef: |
||||
|
fieldPath: metadata.namespace |
||||
|
- name: SEAWEEDFS_FULLNAME |
||||
|
value: "{{ template "seaweedfs.name" . }}" |
||||
|
{{- if .Values.allInOne.extraEnvironmentVars }} |
||||
|
{{- range $key, $value := .Values.allInOne.extraEnvironmentVars }} |
||||
|
- name: {{ $key }} |
||||
|
{{- if kindIs "string" $value }} |
||||
|
value: {{ $value | quote }} |
||||
|
{{- else }} |
||||
|
valueFrom: |
||||
|
{{ toYaml $value | nindent 16 }} |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.global.extraEnvironmentVars }} |
||||
|
{{- range $key, $value := .Values.global.extraEnvironmentVars }} |
||||
|
- name: {{ $key }} |
||||
|
{{- if kindIs "string" $value }} |
||||
|
value: {{ $value | quote }} |
||||
|
{{- else }} |
||||
|
valueFrom: |
||||
|
{{ toYaml $value | nindent 16 }} |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
command: |
||||
|
- "/bin/sh" |
||||
|
- "-ec" |
||||
|
- | |
||||
|
/usr/bin/weed \ |
||||
|
-v={{ .Values.global.loggingLevel }} \ |
||||
|
server \ |
||||
|
-dir=/data \ |
||||
|
-master \ |
||||
|
-volume \ |
||||
|
-ip=${POD_IP} \ |
||||
|
-ip.bind=0.0.0.0 \ |
||||
|
{{- if .Values.allInOne.idleTimeout }} |
||||
|
-idleTimeout={{ .Values.allInOne.idleTimeout }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.dataCenter }} |
||||
|
-dataCenter={{ .Values.allInOne.dataCenter }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.rack }} |
||||
|
-rack={{ .Values.allInOne.rack }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.whiteList }} |
||||
|
-whiteList={{ .Values.allInOne.whiteList }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.disableHttp }} |
||||
|
-disableHttp={{ .Values.allInOne.disableHttp }} \ |
||||
|
{{- end }} |
||||
|
-master.port={{ .Values.master.port }} \ |
||||
|
{{- if .Values.global.enableReplication }} |
||||
|
-master.defaultReplication={{ .Values.global.replicationPlacement }} \ |
||||
|
{{- else }} |
||||
|
-master.defaultReplication={{ .Values.master.defaultReplication }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.master.volumePreallocate }} |
||||
|
-master.volumePreallocate \ |
||||
|
{{- end }} |
||||
|
-master.volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ |
||||
|
{{- if .Values.master.garbageThreshold }} |
||||
|
-master.garbageThreshold={{ .Values.master.garbageThreshold }} \ |
||||
|
{{- end }} |
||||
|
-volume.port={{ .Values.volume.port }} \ |
||||
|
-volume.readMode={{ .Values.volume.readMode }} \ |
||||
|
{{- if .Values.volume.imagesFixOrientation }} |
||||
|
-volume.images.fix.orientation \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.volume.index }} |
||||
|
-volume.index={{ .Values.volume.index }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.volume.fileSizeLimitMB }} |
||||
|
-volume.fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \ |
||||
|
{{- end }} |
||||
|
-volume.minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \ |
||||
|
-volume.compactionMBps={{ .Values.volume.compactionMBps }} \ |
||||
|
{{- if .Values.allInOne.metricsPort }} |
||||
|
-metricsPort={{ .Values.allInOne.metricsPort }} \ |
||||
|
{{- else if .Values.master.metricsPort }} |
||||
|
-metricsPort={{ .Values.master.metricsPort }} \ |
||||
|
{{- end }} |
||||
|
-filer \ |
||||
|
-filer.port={{ .Values.filer.port }} \ |
||||
|
{{- if .Values.filer.disableDirListing }} |
||||
|
-filer.disableDirListing \ |
||||
|
{{- end }} |
||||
|
-filer.dirListLimit={{ .Values.filer.dirListLimit }} \ |
||||
|
{{- if .Values.global.enableReplication }} |
||||
|
-filer.defaultReplicaPlacement={{ .Values.global.replicationPlacement }} \ |
||||
|
{{- else }} |
||||
|
-filer.defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.maxMB }} |
||||
|
-filer.maxMB={{ .Values.filer.maxMB }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.encryptVolumeData }} |
||||
|
-filer.encryptVolumeData \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.filerGroup}} |
||||
|
-filer.filerGroup={{ .Values.filer.filerGroup}} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.rack }} |
||||
|
-filer.rack={{ .Values.filer.rack }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.dataCenter }} |
||||
|
-filer.dataCenter={{ .Values.filer.dataCenter }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.s3.enabled }} |
||||
|
-s3 \ |
||||
|
-s3.port={{ .Values.s3.port }} \ |
||||
|
{{- if .Values.s3.domainName }} |
||||
|
-s3.domainName={{ .Values.s3.domainName }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.global.enableSecurity }} |
||||
|
{{- if .Values.s3.httpsPort }} |
||||
|
-s3.port.https={{ .Values.s3.httpsPort }} \ |
||||
|
{{- end }} |
||||
|
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \ |
||||
|
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \ |
||||
|
{{- end }} |
||||
|
{{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }} |
||||
|
-s3.allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.s3.enableAuth }} |
||||
|
-s3.config=/etc/sw/s3/seaweedfs_s3_config \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.s3.auditLogConfig }} |
||||
|
-s3.auditLogConfig=/etc/sw/s3/s3_auditLogConfig.json \ |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.sftp.enabled }} |
||||
|
-sftp \ |
||||
|
-sftp.port={{ .Values.sftp.port }} \ |
||||
|
{{- if .Values.sftp.sshPrivateKey }} |
||||
|
-sftp.sshPrivateKey={{ .Values.sftp.sshPrivateKey }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.hostKeysFolder }} |
||||
|
-sftp.hostKeysFolder={{ .Values.sftp.hostKeysFolder }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.authMethods }} |
||||
|
-sftp.authMethods={{ .Values.sftp.authMethods }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.maxAuthTries }} |
||||
|
-sftp.maxAuthTries={{ .Values.sftp.maxAuthTries }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.bannerMessage }} |
||||
|
-sftp.bannerMessage="{{ .Values.sftp.bannerMessage }}" \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.loginGraceTime }} |
||||
|
-sftp.loginGraceTime={{ .Values.sftp.loginGraceTime }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.clientAliveInterval }} |
||||
|
-sftp.clientAliveInterval={{ .Values.sftp.clientAliveInterval }} \ |
||||
|
{{- end }} |
||||
|
{{- if .Values.sftp.clientAliveCountMax }} |
||||
|
-sftp.clientAliveCountMax={{ .Values.sftp.clientAliveCountMax }} \ |
||||
|
{{- end }} |
||||
|
-sftp.userStoreFile=/etc/sw/sftp/seaweedfs_sftp_config \ |
||||
|
{{- end }} |
||||
|
|
||||
|
volumeMounts: |
||||
|
- name: data |
||||
|
mountPath: /data |
||||
|
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }} |
||||
|
- name: config-s3-users |
||||
|
mountPath: /etc/sw/s3 |
||||
|
readOnly: true |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.sftp.enabled }} |
||||
|
- name: config-ssh |
||||
|
mountPath: /etc/sw/ssh |
||||
|
readOnly: true |
||||
|
- mountPath: /etc/sw/sftp |
||||
|
name: config-users |
||||
|
readOnly: true |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.notificationConfig }} |
||||
|
- name: notification-config |
||||
|
mountPath: /etc/seaweedfs/notification.toml |
||||
|
subPath: notification.toml |
||||
|
readOnly: true |
||||
|
{{- end }} |
||||
|
- name: master-config |
||||
|
mountPath: /etc/seaweedfs/master.toml |
||||
|
subPath: master.toml |
||||
|
readOnly: true |
||||
|
{{- if .Values.global.enableSecurity }} |
||||
|
- name: security-config |
||||
|
mountPath: /etc/seaweedfs/security.toml |
||||
|
subPath: security.toml |
||||
|
readOnly: true |
||||
|
- name: ca-cert |
||||
|
mountPath: /usr/local/share/ca-certificates/ca/ |
||||
|
readOnly: true |
||||
|
- name: master-cert |
||||
|
mountPath: /usr/local/share/ca-certificates/master/ |
||||
|
readOnly: true |
||||
|
- name: volume-cert |
||||
|
mountPath: /usr/local/share/ca-certificates/volume/ |
||||
|
readOnly: true |
||||
|
- name: filer-cert |
||||
|
mountPath: /usr/local/share/ca-certificates/filer/ |
||||
|
readOnly: true |
||||
|
- name: client-cert |
||||
|
mountPath: /usr/local/share/ca-certificates/client/ |
||||
|
readOnly: true |
||||
|
{{- end }} |
||||
|
{{ tpl .Values.allInOne.extraVolumeMounts . | nindent 12 }} |
||||
|
ports: |
||||
|
- containerPort: {{ .Values.master.port }} |
||||
|
name: swfs-mas |
||||
|
- containerPort: {{ .Values.master.grpcPort }} |
||||
|
name: swfs-mas-grpc |
||||
|
- containerPort: {{ .Values.volume.port }} |
||||
|
name: swfs-vol |
||||
|
- containerPort: {{ .Values.volume.grpcPort }} |
||||
|
name: swfs-vol-grpc |
||||
|
- containerPort: {{ .Values.filer.port }} |
||||
|
name: swfs-fil |
||||
|
- containerPort: {{ .Values.filer.grpcPort }} |
||||
|
name: swfs-fil-grpc |
||||
|
{{- if .Values.allInOne.s3.enabled }} |
||||
|
- containerPort: {{ .Values.s3.port }} |
||||
|
name: swfs-s3 |
||||
|
{{- if .Values.s3.httpsPort }} |
||||
|
- containerPort: {{ .Values.s3.httpsPort }} |
||||
|
name: swfs-s3-tls |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.sftp.enabled }} |
||||
|
- containerPort: {{ .Values.sftp.port }} |
||||
|
name: swfs-sftp |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.metricsPort }} |
||||
|
- containerPort: {{ .Values.allInOne.metricsPort }} |
||||
|
name: server-metrics |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.readinessProbe.enabled }} |
||||
|
readinessProbe: |
||||
|
httpGet: |
||||
|
path: {{ .Values.allInOne.readinessProbe.httpGet.path }} |
||||
|
port: {{ .Values.master.port }} |
||||
|
scheme: {{ .Values.allInOne.readinessProbe.scheme }} |
||||
|
initialDelaySeconds: {{ .Values.allInOne.readinessProbe.initialDelaySeconds }} |
||||
|
periodSeconds: {{ .Values.allInOne.readinessProbe.periodSeconds }} |
||||
|
successThreshold: {{ .Values.allInOne.readinessProbe.successThreshold }} |
||||
|
failureThreshold: {{ .Values.allInOne.readinessProbe.failureThreshold }} |
||||
|
timeoutSeconds: {{ .Values.allInOne.readinessProbe.timeoutSeconds }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.livenessProbe.enabled }} |
||||
|
livenessProbe: |
||||
|
httpGet: |
||||
|
path: {{ .Values.allInOne.livenessProbe.httpGet.path }} |
||||
|
port: {{ .Values.master.port }} |
||||
|
scheme: {{ .Values.allInOne.livenessProbe.scheme }} |
||||
|
initialDelaySeconds: {{ .Values.allInOne.livenessProbe.initialDelaySeconds }} |
||||
|
periodSeconds: {{ .Values.allInOne.livenessProbe.periodSeconds }} |
||||
|
successThreshold: {{ .Values.allInOne.livenessProbe.successThreshold }} |
||||
|
failureThreshold: {{ .Values.allInOne.livenessProbe.failureThreshold }} |
||||
|
timeoutSeconds: {{ .Values.allInOne.livenessProbe.timeoutSeconds }} |
||||
|
{{- end }} |
||||
|
{{- with .Values.allInOne.resources }} |
||||
|
resources: |
||||
|
{{- toYaml . | nindent 12 }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.containerSecurityContext.enabled }} |
||||
|
securityContext: |
||||
|
{{- omit .Values.allInOne.containerSecurityContext "enabled" | toYaml | nindent 12 }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.sidecars }} |
||||
|
{{- include "common.tplvalues.render" (dict "value" .Values.allInOne.sidecars "context" $) | nindent 8 }} |
||||
|
{{- end }} |
||||
|
volumes: |
||||
|
- name: data |
||||
|
{{- if eq .Values.allInOne.data.type "hostPath" }} |
||||
|
hostPath: |
||||
|
path: {{ .Values.allInOne.data.hostPathPrefix }}/seaweedfs-all-in-one-data/ |
||||
|
type: DirectoryOrCreate |
||||
|
{{- else if eq .Values.allInOne.data.type "persistentVolumeClaim" }} |
||||
|
persistentVolumeClaim: |
||||
|
claimName: {{ .Values.allInOne.data.claimName }} |
||||
|
{{- else if eq .Values.allInOne.data.type "emptyDir" }} |
||||
|
emptyDir: {} |
||||
|
{{- end }} |
||||
|
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }} |
||||
|
- name: config-s3-users |
||||
|
secret: |
||||
|
defaultMode: 420 |
||||
|
secretName: {{ default (printf "%s-s3-secret" (include "seaweedfs.name" .)) (or .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret) }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.allInOne.sftp.enabled }} |
||||
|
- name: config-ssh |
||||
|
secret: |
||||
|
defaultMode: 420 |
||||
|
secretName: {{ default (printf "%s-sftp-ssh-secret" (include "seaweedfs.name" .)) .Values.sftp.existingSshConfigSecret }} |
||||
|
- name: config-users |
||||
|
secret: |
||||
|
defaultMode: 420 |
||||
|
secretName: {{ default (printf "%s-sftp-secret" (include "seaweedfs.name" .)) .Values.sftp.existingConfigSecret }} |
||||
|
{{- end }} |
||||
|
{{- if .Values.filer.notificationConfig }} |
||||
|
- name: notification-config |
||||
|
configMap: |
||||
|
name: {{ template "seaweedfs.name" . }}-notification-config |
||||
|
{{- end }} |
||||
|
- name: master-config |
||||
|
configMap: |
||||
|
name: {{ template "seaweedfs.name" . }}-master-config |
||||
|
{{- if .Values.global.enableSecurity }} |
||||
|
- name: security-config |
||||
|
configMap: |
||||
|
name: {{ template "seaweedfs.name" . }}-security-config |
||||
|
- name: ca-cert |
||||
|
secret: |
||||
|
secretName: {{ template "seaweedfs.name" . }}-ca-cert |
||||
|
- name: master-cert |
||||
|
secret: |
||||
|
secretName: {{ template "seaweedfs.name" . }}-master-cert |
||||
|
- name: volume-cert |
||||
|
secret: |
||||
|
secretName: {{ template "seaweedfs.name" . }}-volume-cert |
||||
|
- name: filer-cert |
||||
|
secret: |
||||
|
secretName: {{ template "seaweedfs.name" . }}-filer-cert |
||||
|
- name: client-cert |
||||
|
secret: |
||||
|
secretName: {{ template "seaweedfs.name" . }}-client-cert |
||||
|
{{- end }} |
||||
|
{{ tpl .Values.allInOne.extraVolumes . | nindent 8 }} |
||||
|
{{- if .Values.allInOne.nodeSelector }} |
||||
|
nodeSelector: |
||||
|
{{ tpl .Values.allInOne.nodeSelector . | nindent 8 }} |
||||
|
{{- end }} |
||||
|
{{- end }} |
@ -0,0 +1,21 @@ |
|||||
|
{{- if and .Values.allInOne.enabled (eq .Values.allInOne.data.type "persistentVolumeClaim") }} |
||||
|
apiVersion: v1 |
||||
|
kind: PersistentVolumeClaim |
||||
|
metadata: |
||||
|
name: {{ .Values.allInOne.data.claimName }} |
||||
|
labels: |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
{{- if .Values.allInOne.annotations }} |
||||
|
annotations: |
||||
|
{{- toYaml .Values.allInOne.annotations | nindent 4 }} |
||||
|
{{- end }} |
||||
|
spec: |
||||
|
accessModes: |
||||
|
- ReadWriteOnce |
||||
|
resources: |
||||
|
requests: |
||||
|
storage: {{ .Values.allInOne.data.size }} |
||||
|
{{- if .Values.allInOne.data.storageClass }} |
||||
|
storageClassName: {{ .Values.allInOne.data.storageClass }} |
||||
|
{{- end }} |
||||
|
{{- end }} |
@ -0,0 +1,83 @@ |
|||||
|
{{- if .Values.allInOne.enabled }} |
||||
|
apiVersion: v1 |
||||
|
kind: Service |
||||
|
metadata: |
||||
|
name: {{ template "seaweedfs.name" . }}-all-in-one |
||||
|
namespace: {{ .Release.Namespace }} |
||||
|
labels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} |
||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
{{- if .Values.allInOne.service.annotations }} |
||||
|
annotations: |
||||
|
{{- toYaml .Values.allInOne.service.annotations | nindent 4 }} |
||||
|
{{- end }} |
||||
|
spec: |
||||
|
internalTrafficPolicy: {{ .Values.allInOne.service.internalTrafficPolicy | default "Cluster" }} |
||||
|
ports: |
||||
|
# Master ports |
||||
|
- name: "swfs-master" |
||||
|
port: {{ .Values.master.port }} |
||||
|
targetPort: {{ .Values.master.port }} |
||||
|
protocol: TCP |
||||
|
- name: "swfs-master-grpc" |
||||
|
port: {{ .Values.master.grpcPort }} |
||||
|
targetPort: {{ .Values.master.grpcPort }} |
||||
|
protocol: TCP |
||||
|
|
||||
|
# Volume ports |
||||
|
- name: "swfs-volume" |
||||
|
port: {{ .Values.volume.port }} |
||||
|
targetPort: {{ .Values.volume.port }} |
||||
|
protocol: TCP |
||||
|
- name: "swfs-volume-grpc" |
||||
|
port: {{ .Values.volume.grpcPort }} |
||||
|
targetPort: {{ .Values.volume.grpcPort }} |
||||
|
protocol: TCP |
||||
|
|
||||
|
# Filer ports |
||||
|
- name: "swfs-filer" |
||||
|
port: {{ .Values.filer.port }} |
||||
|
targetPort: {{ .Values.filer.port }} |
||||
|
protocol: TCP |
||||
|
- name: "swfs-filer-grpc" |
||||
|
port: {{ .Values.filer.grpcPort }} |
||||
|
targetPort: {{ .Values.filer.grpcPort }} |
||||
|
protocol: TCP |
||||
|
|
||||
|
# S3 ports (if enabled) |
||||
|
{{- if .Values.allInOne.s3.enabled }} |
||||
|
- name: "swfs-s3" |
||||
|
port: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} |
||||
|
targetPort: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} |
||||
|
protocol: TCP |
||||
|
{{- if and .Values.allInOne.s3.enabled .Values.s3.httpsPort }} |
||||
|
- name: "swfs-s3-tls" |
||||
|
port: {{ .Values.s3.httpsPort }} |
||||
|
targetPort: {{ .Values.s3.httpsPort }} |
||||
|
protocol: TCP |
||||
|
{{- end }} |
||||
|
{{- end }} |
||||
|
|
||||
|
# SFTP ports (if enabled) |
||||
|
{{- if .Values.allInOne.sftp.enabled }} |
||||
|
- name: "swfs-sftp" |
||||
|
port: {{ .Values.sftp.port }} |
||||
|
targetPort: {{ .Values.sftp.port }} |
||||
|
protocol: TCP |
||||
|
{{- end }} |
||||
|
|
||||
|
# Server metrics port (single metrics endpoint for all services) |
||||
|
{{- if .Values.allInOne.metricsPort }} |
||||
|
- name: "server-metrics" |
||||
|
port: {{ .Values.allInOne.metricsPort }} |
||||
|
targetPort: {{ .Values.allInOne.metricsPort }} |
||||
|
protocol: TCP |
||||
|
{{- end }} |
||||
|
|
||||
|
selector: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
{{- end }} |
@ -0,0 +1,29 @@ |
|||||
|
{{- if .Values.allInOne.enabled }} |
||||
|
{{- if .Values.global.monitoring.enabled }} |
||||
|
apiVersion: monitoring.coreos.com/v1 |
||||
|
kind: ServiceMonitor |
||||
|
metadata: |
||||
|
name: {{ template "seaweedfs.name" . }}-all-in-one |
||||
|
namespace: {{ .Release.Namespace }} |
||||
|
labels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} |
||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
app.kubernetes.io/component: all-in-one |
||||
|
{{- with .Values.global.monitoring.additionalLabels }} |
||||
|
{{- toYaml . | nindent 4 }} |
||||
|
{{- end }} |
||||
|
spec: |
||||
|
endpoints: |
||||
|
{{- if .Values.allInOne.metricsPort }} |
||||
|
- interval: 30s |
||||
|
port: server-metrics |
||||
|
scrapeTimeout: 5s |
||||
|
{{- end }} |
||||
|
selector: |
||||
|
matchLabels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
app.kubernetes.io/component: seaweedfs-all-in-one |
||||
|
{{- end }} |
||||
|
{{- end }} |
@ -1,20 +1,19 @@ |
|||||
{{- if .Values.global.monitoring.enabled }} |
{{- if .Values.global.monitoring.enabled }} |
||||
{{- $files := .Files.Glob "dashboards/*.json" }} |
{{- $files := .Files.Glob "dashboards/*.json" }} |
||||
{{- if $files }} |
{{- if $files }} |
||||
apiVersion: v1 |
|
||||
kind: ConfigMapList |
|
||||
items: |
|
||||
{{- range $path, $fileContents := $files }} |
|
||||
|
{{- range $path, $file := $files }} |
||||
{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} |
{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} |
||||
- apiVersion: v1 |
|
||||
kind: ConfigMap |
|
||||
metadata: |
|
||||
name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }} |
|
||||
namespace: {{ $.Release.Namespace }} |
|
||||
labels: |
|
||||
grafana_dashboard: "1" |
|
||||
data: |
|
||||
{{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} |
|
||||
|
--- |
||||
|
apiVersion: v1 |
||||
|
kind: ConfigMap |
||||
|
metadata: |
||||
|
name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }} |
||||
|
namespace: {{ $.Release.Namespace }} |
||||
|
labels: |
||||
|
grafana_dashboard: "1" |
||||
|
data: |
||||
|
{{ $dashboardName }}.json: |- |
||||
|
{{ toString $file | indent 4 }} |
||||
|
{{- end }} |
||||
{{- end }} |
{{- end }} |
||||
{{- end }} |
{{- end }} |
||||
{{- end }} |
|
@ -0,0 +1,99 @@ |
|||||
|
package sftpd |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"sync" |
||||
|
|
||||
|
"github.com/seaweedfs/seaweedfs/weed/filer" |
||||
|
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/sftpd/utils" |
||||
|
) |
||||
|
|
||||
|
type SeaweedFileReaderAt struct { |
||||
|
fs *SftpServer |
||||
|
entry *filer_pb.Entry |
||||
|
reader io.ReadSeeker |
||||
|
mu sync.Mutex |
||||
|
bufferSize int |
||||
|
cache *utils.LruCache |
||||
|
fileSize int64 |
||||
|
} |
||||
|
|
||||
|
func NewSeaweedFileReaderAt(fs *SftpServer, entry *filer_pb.Entry) *SeaweedFileReaderAt { |
||||
|
return &SeaweedFileReaderAt{ |
||||
|
fs: fs, |
||||
|
entry: entry, |
||||
|
bufferSize: 5 * 1024 * 1024, // 5MB
|
||||
|
cache: utils.NewLRUCache(10), // Max 10 chunks = ~50MB
|
||||
|
fileSize: int64(entry.Attributes.FileSize), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (ra *SeaweedFileReaderAt) ReadAt(p []byte, off int64) (n int, err error) { |
||||
|
ra.mu.Lock() |
||||
|
defer ra.mu.Unlock() |
||||
|
|
||||
|
if off >= ra.fileSize { |
||||
|
return 0, io.EOF |
||||
|
} |
||||
|
|
||||
|
remaining := len(p) |
||||
|
readOffset := off |
||||
|
totalRead := 0 |
||||
|
|
||||
|
for remaining > 0 && readOffset < ra.fileSize { |
||||
|
bufferKey := (readOffset / int64(ra.bufferSize)) * int64(ra.bufferSize) |
||||
|
bufferOffset := int(readOffset - bufferKey) |
||||
|
|
||||
|
buffer, ok := ra.cache.Get(bufferKey) |
||||
|
if !ok { |
||||
|
readSize := ra.bufferSize |
||||
|
if bufferKey+int64(readSize) > ra.fileSize { |
||||
|
readSize = int(ra.fileSize - bufferKey) |
||||
|
} |
||||
|
|
||||
|
if ra.reader == nil { |
||||
|
r := filer.NewFileReader(ra.fs, ra.entry) |
||||
|
if rs, ok := r.(io.ReadSeeker); ok { |
||||
|
ra.reader = rs |
||||
|
} else { |
||||
|
return 0, fmt.Errorf("reader is not seekable") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if _, err := ra.reader.Seek(bufferKey, io.SeekStart); err != nil { |
||||
|
return 0, fmt.Errorf("seek error: %v", err) |
||||
|
} |
||||
|
|
||||
|
buffer = make([]byte, readSize) |
||||
|
readBytes, err := io.ReadFull(ra.reader, buffer) |
||||
|
if err != nil && err != io.ErrUnexpectedEOF { |
||||
|
return 0, fmt.Errorf("read error: %v", err) |
||||
|
} |
||||
|
buffer = buffer[:readBytes] |
||||
|
ra.cache.Put(bufferKey, buffer) |
||||
|
} |
||||
|
|
||||
|
toCopy := len(buffer) - bufferOffset |
||||
|
if toCopy > remaining { |
||||
|
toCopy = remaining |
||||
|
} |
||||
|
if toCopy <= 0 { |
||||
|
break |
||||
|
} |
||||
|
|
||||
|
copy(p[totalRead:], buffer[bufferOffset:bufferOffset+toCopy]) |
||||
|
totalRead += toCopy |
||||
|
readOffset += int64(toCopy) |
||||
|
remaining -= toCopy |
||||
|
} |
||||
|
|
||||
|
if totalRead == 0 { |
||||
|
return 0, io.EOF |
||||
|
} |
||||
|
if totalRead < len(p) { |
||||
|
return totalRead, io.EOF |
||||
|
} |
||||
|
return totalRead, nil |
||||
|
} |
@ -1,143 +0,0 @@ |
|||||
package sftpd |
|
||||
|
|
||||
import ( |
|
||||
"crypto/subtle" |
|
||||
"encoding/json" |
|
||||
"fmt" |
|
||||
"os" |
|
||||
"strings" |
|
||||
"sync" |
|
||||
) |
|
||||
|
|
||||
// UserStore interface for user management.
|
|
||||
type UserStore interface { |
|
||||
GetUser(username string) (*User, error) |
|
||||
ValidatePassword(username string, password []byte) bool |
|
||||
ValidatePublicKey(username string, keyData string) bool |
|
||||
GetUserPermissions(username string, path string) []string |
|
||||
} |
|
||||
|
|
||||
// User represents an SFTP user with authentication and permission details.
|
|
||||
type User struct { |
|
||||
Username string |
|
||||
Password string // Plaintext password
|
|
||||
PublicKeys []string // Authorized public keys
|
|
||||
HomeDir string // User's home directory
|
|
||||
Permissions map[string][]string // path -> permissions (read, write, list, etc.)
|
|
||||
Uid uint32 // User ID for file ownership
|
|
||||
Gid uint32 // Group ID for file ownership
|
|
||||
} |
|
||||
|
|
||||
// FileUserStore implements UserStore using a JSON file.
|
|
||||
type FileUserStore struct { |
|
||||
filePath string |
|
||||
users map[string]*User |
|
||||
mu sync.RWMutex |
|
||||
} |
|
||||
|
|
||||
// NewFileUserStore creates a new user store from a JSON file.
|
|
||||
func NewFileUserStore(filePath string) (*FileUserStore, error) { |
|
||||
store := &FileUserStore{ |
|
||||
filePath: filePath, |
|
||||
users: make(map[string]*User), |
|
||||
} |
|
||||
|
|
||||
if err := store.loadUsers(); err != nil { |
|
||||
return nil, err |
|
||||
} |
|
||||
|
|
||||
return store, nil |
|
||||
} |
|
||||
|
|
||||
// loadUsers loads users from the JSON file.
|
|
||||
func (s *FileUserStore) loadUsers() error { |
|
||||
s.mu.Lock() |
|
||||
defer s.mu.Unlock() |
|
||||
|
|
||||
// Check if file exists
|
|
||||
if _, err := os.Stat(s.filePath); os.IsNotExist(err) { |
|
||||
return fmt.Errorf("user store file not found: %s", s.filePath) |
|
||||
} |
|
||||
|
|
||||
data, err := os.ReadFile(s.filePath) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("failed to read user store file: %v", err) |
|
||||
} |
|
||||
|
|
||||
var users []*User |
|
||||
if err := json.Unmarshal(data, &users); err != nil { |
|
||||
return fmt.Errorf("failed to parse user store file: %v", err) |
|
||||
} |
|
||||
|
|
||||
for _, user := range users { |
|
||||
s.users[user.Username] = user |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
// GetUser returns a user by username.
|
|
||||
func (s *FileUserStore) GetUser(username string) (*User, error) { |
|
||||
s.mu.RLock() |
|
||||
defer s.mu.RUnlock() |
|
||||
|
|
||||
user, ok := s.users[username] |
|
||||
if !ok { |
|
||||
return nil, fmt.Errorf("user not found: %s", username) |
|
||||
} |
|
||||
|
|
||||
return user, nil |
|
||||
} |
|
||||
|
|
||||
// ValidatePassword checks if the password is valid for the user.
|
|
||||
func (s *FileUserStore) ValidatePassword(username string, password []byte) bool { |
|
||||
user, err := s.GetUser(username) |
|
||||
if err != nil { |
|
||||
return false |
|
||||
} |
|
||||
|
|
||||
// Compare plaintext password using constant time comparison for security
|
|
||||
return subtle.ConstantTimeCompare([]byte(user.Password), password) == 1 |
|
||||
} |
|
||||
|
|
||||
// ValidatePublicKey checks if the public key is valid for the user.
|
|
||||
func (s *FileUserStore) ValidatePublicKey(username string, keyData string) bool { |
|
||||
user, err := s.GetUser(username) |
|
||||
if err != nil { |
|
||||
return false |
|
||||
} |
|
||||
|
|
||||
for _, key := range user.PublicKeys { |
|
||||
if subtle.ConstantTimeCompare([]byte(key), []byte(keyData)) == 1 { |
|
||||
return true |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return false |
|
||||
} |
|
||||
|
|
||||
// GetUserPermissions returns the permissions for a user on a path.
|
|
||||
func (s *FileUserStore) GetUserPermissions(username string, path string) []string { |
|
||||
user, err := s.GetUser(username) |
|
||||
if err != nil { |
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
// Check exact path match first
|
|
||||
if perms, ok := user.Permissions[path]; ok { |
|
||||
return perms |
|
||||
} |
|
||||
|
|
||||
// Check parent directories
|
|
||||
var bestMatch string |
|
||||
var bestPerms []string |
|
||||
|
|
||||
for p, perms := range user.Permissions { |
|
||||
if strings.HasPrefix(path, p) && len(p) > len(bestMatch) { |
|
||||
bestMatch = p |
|
||||
bestPerms = perms |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return bestPerms |
|
||||
} |
|
@ -1,204 +0,0 @@ |
|||||
package user |
|
||||
|
|
||||
import ( |
|
||||
"context" |
|
||||
"fmt" |
|
||||
"os" |
|
||||
"path/filepath" |
|
||||
"strings" |
|
||||
"time" |
|
||||
|
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|
||||
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
|
||||
"github.com/seaweedfs/seaweedfs/weed/util" |
|
||||
) |
|
||||
|
|
||||
// HomeManager handles user home directory operations
|
|
||||
type HomeManager struct { |
|
||||
filerClient FilerClient |
|
||||
} |
|
||||
|
|
||||
// FilerClient defines the interface for interacting with the filer
|
|
||||
type FilerClient interface { |
|
||||
WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error |
|
||||
GetDataCenter() string |
|
||||
AdjustedUrl(location *filer_pb.Location) string |
|
||||
} |
|
||||
|
|
||||
// NewHomeManager creates a new home directory manager
|
|
||||
func NewHomeManager(filerClient FilerClient) *HomeManager { |
|
||||
return &HomeManager{ |
|
||||
filerClient: filerClient, |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
// EnsureHomeDirectory creates the user's home directory if it doesn't exist
|
|
||||
func (hm *HomeManager) EnsureHomeDirectory(user *User) error { |
|
||||
if user.HomeDir == "" { |
|
||||
return fmt.Errorf("user has no home directory configured") |
|
||||
} |
|
||||
|
|
||||
glog.V(0).Infof("Ensuring home directory exists for user %s: %s", user.Username, user.HomeDir) |
|
||||
|
|
||||
// Check if home directory exists and create it if needed
|
|
||||
err := hm.createDirectoryIfNotExists(user.HomeDir, user) |
|
||||
if err != nil { |
|
||||
return fmt.Errorf("failed to ensure home directory: %v", err) |
|
||||
} |
|
||||
|
|
||||
// Update user permissions map to include the home directory with full access if not already present
|
|
||||
if user.Permissions == nil { |
|
||||
user.Permissions = make(map[string][]string) |
|
||||
} |
|
||||
|
|
||||
// Only add permissions if not already present
|
|
||||
if _, exists := user.Permissions[user.HomeDir]; !exists { |
|
||||
user.Permissions[user.HomeDir] = []string{"all"} |
|
||||
glog.V(0).Infof("Added full permissions for user %s to home directory %s", |
|
||||
user.Username, user.HomeDir) |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
// createDirectoryIfNotExists creates a directory path if it doesn't exist
|
|
||||
func (hm *HomeManager) createDirectoryIfNotExists(dirPath string, user *User) error { |
|
||||
// Split the path into components
|
|
||||
components := strings.Split(strings.Trim(dirPath, "/"), "/") |
|
||||
currentPath := "/" |
|
||||
|
|
||||
for _, component := range components { |
|
||||
if component == "" { |
|
||||
continue |
|
||||
} |
|
||||
|
|
||||
nextPath := filepath.Join(currentPath, component) |
|
||||
err := hm.createSingleDirectory(nextPath, user) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
currentPath = nextPath |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
// createSingleDirectory creates a single directory if it doesn't exist
|
|
||||
func (hm *HomeManager) createSingleDirectory(dirPath string, user *User) error { |
|
||||
var dirExists bool |
|
||||
|
|
||||
err := hm.filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { |
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
|
||||
defer cancel() |
|
||||
|
|
||||
dir, name := util.FullPath(dirPath).DirAndName() |
|
||||
|
|
||||
// Check if directory exists
|
|
||||
resp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ |
|
||||
Directory: dir, |
|
||||
Name: name, |
|
||||
}) |
|
||||
|
|
||||
if err != nil || resp.Entry == nil { |
|
||||
// Directory doesn't exist, create it
|
|
||||
glog.V(0).Infof("Creating directory %s for user %s", dirPath, user.Username) |
|
||||
|
|
||||
err = filer_pb.Mkdir(context.Background(), hm, string(dir), name, func(entry *filer_pb.Entry) { |
|
||||
// Set appropriate permissions
|
|
||||
entry.Attributes.FileMode = uint32(0700 | os.ModeDir) // rwx------ for user
|
|
||||
entry.Attributes.Uid = user.Uid |
|
||||
entry.Attributes.Gid = user.Gid |
|
||||
|
|
||||
// Set creation and modification times
|
|
||||
now := time.Now().Unix() |
|
||||
entry.Attributes.Crtime = now |
|
||||
entry.Attributes.Mtime = now |
|
||||
|
|
||||
// Add extended attributes
|
|
||||
if entry.Extended == nil { |
|
||||
entry.Extended = make(map[string][]byte) |
|
||||
} |
|
||||
entry.Extended["creator"] = []byte(user.Username) |
|
||||
entry.Extended["auto_created"] = []byte("true") |
|
||||
}) |
|
||||
|
|
||||
if err != nil { |
|
||||
return fmt.Errorf("failed to create directory %s: %v", dirPath, err) |
|
||||
} |
|
||||
} else if !resp.Entry.IsDirectory { |
|
||||
return fmt.Errorf("path %s exists but is not a directory", dirPath) |
|
||||
} else { |
|
||||
dirExists = true |
|
||||
|
|
||||
// Update ownership if needed
|
|
||||
if resp.Entry.Attributes.Uid != user.Uid || resp.Entry.Attributes.Gid != user.Gid { |
|
||||
glog.V(0).Infof("Updating ownership of directory %s for user %s", dirPath, user.Username) |
|
||||
|
|
||||
entry := resp.Entry |
|
||||
entry.Attributes.Uid = user.Uid |
|
||||
entry.Attributes.Gid = user.Gid |
|
||||
|
|
||||
_, updateErr := client.UpdateEntry(ctx, &filer_pb.UpdateEntryRequest{ |
|
||||
Directory: dir, |
|
||||
Entry: entry, |
|
||||
}) |
|
||||
|
|
||||
if updateErr != nil { |
|
||||
glog.Warningf("Failed to update directory ownership: %v", updateErr) |
|
||||
} |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
}) |
|
||||
|
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
if !dirExists { |
|
||||
// Verify the directory was created
|
|
||||
verifyErr := hm.filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { |
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) |
|
||||
defer cancel() |
|
||||
|
|
||||
dir, name := util.FullPath(dirPath).DirAndName() |
|
||||
resp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ |
|
||||
Directory: dir, |
|
||||
Name: name, |
|
||||
}) |
|
||||
|
|
||||
if err != nil || resp.Entry == nil { |
|
||||
return fmt.Errorf("directory not found after creation") |
|
||||
} |
|
||||
|
|
||||
if !resp.Entry.IsDirectory { |
|
||||
return fmt.Errorf("path exists but is not a directory") |
|
||||
} |
|
||||
|
|
||||
dirExists = true |
|
||||
return nil |
|
||||
}) |
|
||||
|
|
||||
if verifyErr != nil { |
|
||||
return fmt.Errorf("failed to verify directory creation: %v", verifyErr) |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
// Implement necessary methods to satisfy the filer_pb.FilerClient interface
|
|
||||
func (hm *HomeManager) AdjustedUrl(location *filer_pb.Location) string { |
|
||||
return hm.filerClient.AdjustedUrl(location) |
|
||||
} |
|
||||
|
|
||||
func (hm *HomeManager) GetDataCenter() string { |
|
||||
return hm.filerClient.GetDataCenter() |
|
||||
} |
|
||||
|
|
||||
// WithFilerClient delegates to the underlying filer client
|
|
||||
func (hm *HomeManager) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error { |
|
||||
return hm.filerClient.WithFilerClient(streamingMode, fn) |
|
||||
} |
|
@ -0,0 +1,52 @@ |
|||||
|
package utils |
||||
|
|
||||
|
import ( |
||||
|
"container/list" |
||||
|
) |
||||
|
|
||||
|
type CacheEntry struct { |
||||
|
key int64 |
||||
|
value []byte |
||||
|
} |
||||
|
|
||||
|
type LruCache struct { |
||||
|
capacity int |
||||
|
ll *list.List |
||||
|
cache map[int64]*list.Element |
||||
|
} |
||||
|
|
||||
|
func NewLRUCache(capacity int) *LruCache { |
||||
|
return &LruCache{ |
||||
|
capacity: capacity, |
||||
|
ll: list.New(), |
||||
|
cache: make(map[int64]*list.Element), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (c *LruCache) Get(key int64) ([]byte, bool) { |
||||
|
if ele, ok := c.cache[key]; ok { |
||||
|
c.ll.MoveToFront(ele) |
||||
|
return ele.Value.(*CacheEntry).value, true |
||||
|
} |
||||
|
return nil, false |
||||
|
} |
||||
|
|
||||
|
func (c *LruCache) Put(key int64, value []byte) { |
||||
|
if ele, ok := c.cache[key]; ok { |
||||
|
c.ll.MoveToFront(ele) |
||||
|
ele.Value.(*CacheEntry).value = value |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if c.ll.Len() >= c.capacity { |
||||
|
oldest := c.ll.Back() |
||||
|
if oldest != nil { |
||||
|
c.ll.Remove(oldest) |
||||
|
delete(c.cache, oldest.Value.(*CacheEntry).key) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
entry := &CacheEntry{key, value} |
||||
|
ele := c.ll.PushFront(entry) |
||||
|
c.cache[key] = ele |
||||
|
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue