From 1defee3d682d86c7e0cbc7db7ebdb9cae872a471 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 1 Jul 2025 01:28:09 -0700 Subject: [PATCH] Add admin component (#6928) * init version * relocate * add s3 bucket link * refactor handlers into weed/admin folder * fix login logout * adding favicon * remove fall back to http get topology * grpc dial option, disk total capacity * show filer count * fix each volume disk usage * add filers to dashboard * adding hosts, volumes, collections * refactor code and menu * remove "refresh" button * fix data for collections * rename cluster hosts into volume servers * add masters, filers * reorder * adding file browser * create folder and upload files * add filer version, created at time * remove mock data * remove fields * fix submenu item highlighting * fix bucket creation * purge files * delete multiple * fix bucket creation * remove region from buckets * add object store with buckets and users * rendering permission * refactor * get bucket objects and size * link to file browser * add file size and count for collections page * paginate the volumes * fix possible SSRF https://github.com/seaweedfs/seaweedfs/pull/6928/checks?check_run_id=45108469801 * Update weed/command/admin.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update weed/command/admin.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix build * import * remove filer CLI option * remove filer option * remove CLI options --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- Makefile | 42 +- go.mod | 16 +- go.sum | 31 +- weed/admin/DESIGN.md | 321 ++++ weed/admin/Makefile | 165 ++ weed/admin/NAVIGATION_TEST.md | 96 + weed/admin/README.md | 279 +++ weed/admin/S3_BUCKETS.md | 174 ++ weed/admin/admin.go | 247 +++ weed/admin/dash/admin_server.go | 1146 ++++++++++++ weed/admin/dash/file_browser.go | 350 ++++ weed/admin/dash/handler_admin.go | 373 ++++ weed/admin/dash/handler_auth.go | 128 ++ weed/admin/dash/middleware.go | 27 + weed/admin/handlers/auth.go | 45 + weed/admin/handlers/cluster_handlers.go | 202 +++ weed/admin/handlers/file_browser_handlers.go | 447 +++++ weed/admin/handlers/handlers.go | 320 ++++ weed/admin/static/css/admin.css | 217 +++ weed/admin/static/favicon.ico | Bin 0 -> 5558 bytes weed/admin/static/js/admin.js | 1576 +++++++++++++++++ weed/admin/view/app/admin.templ | 351 ++++ weed/admin/view/app/admin_templ.go | 555 ++++++ weed/admin/view/app/cluster_collections.templ | 360 ++++ .../view/app/cluster_collections_templ.go | 346 ++++ weed/admin/view/app/cluster_filers.templ | 163 ++ weed/admin/view/app/cluster_filers_templ.go | 252 +++ weed/admin/view/app/cluster_masters.templ | 209 +++ weed/admin/view/app/cluster_masters_templ.go | 247 +++ .../view/app/cluster_volume_servers.templ | 221 +++ .../view/app/cluster_volume_servers_templ.go | 306 ++++ weed/admin/view/app/cluster_volumes.templ | 414 +++++ weed/admin/view/app/cluster_volumes_templ.go | 661 +++++++ weed/admin/view/app/file_browser.templ | 438 +++++ weed/admin/view/app/file_browser_templ.go | 607 +++++++ weed/admin/view/app/object_store_users.templ | 214 +++ .../view/app/object_store_users_templ.go | 237 +++ weed/admin/view/app/s3_buckets.templ | 302 ++++ weed/admin/view/app/s3_buckets_templ.go | 277 +++ weed/admin/view/app/template_helpers.go | 84 + weed/admin/view/layout/layout.templ | 263 +++ weed/admin/view/layout/layout_templ.go | 163 ++ weed/command/admin.go | 236 +++ weed/command/command.go | 1 + 44 files changed, 13095 insertions(+), 14 deletions(-) create mode 100644 weed/admin/DESIGN.md create mode 100644 weed/admin/Makefile create mode 100644 weed/admin/NAVIGATION_TEST.md create mode 100644 weed/admin/README.md create mode 100644 weed/admin/S3_BUCKETS.md create mode 100644 weed/admin/admin.go create mode 100644 weed/admin/dash/admin_server.go create mode 100644 weed/admin/dash/file_browser.go create mode 100644 weed/admin/dash/handler_admin.go create mode 100644 weed/admin/dash/handler_auth.go create mode 100644 weed/admin/dash/middleware.go create mode 100644 weed/admin/handlers/auth.go create mode 100644 weed/admin/handlers/cluster_handlers.go create mode 100644 weed/admin/handlers/file_browser_handlers.go create mode 100644 weed/admin/handlers/handlers.go create mode 100644 weed/admin/static/css/admin.css create mode 100644 weed/admin/static/favicon.ico create mode 100644 weed/admin/static/js/admin.js create mode 100644 weed/admin/view/app/admin.templ create mode 100644 weed/admin/view/app/admin_templ.go create mode 100644 weed/admin/view/app/cluster_collections.templ create mode 100644 weed/admin/view/app/cluster_collections_templ.go create mode 100644 weed/admin/view/app/cluster_filers.templ create mode 100644 weed/admin/view/app/cluster_filers_templ.go create mode 100644 weed/admin/view/app/cluster_masters.templ create mode 100644 weed/admin/view/app/cluster_masters_templ.go create mode 100644 weed/admin/view/app/cluster_volume_servers.templ create mode 100644 weed/admin/view/app/cluster_volume_servers_templ.go create mode 100644 weed/admin/view/app/cluster_volumes.templ create mode 100644 weed/admin/view/app/cluster_volumes_templ.go create mode 100644 weed/admin/view/app/file_browser.templ create mode 100644 weed/admin/view/app/file_browser_templ.go create mode 100644 weed/admin/view/app/object_store_users.templ create mode 100644 weed/admin/view/app/object_store_users_templ.go create mode 100644 weed/admin/view/app/s3_buckets.templ create mode 100644 weed/admin/view/app/s3_buckets_templ.go create mode 100644 weed/admin/view/app/template_helpers.go create mode 100644 weed/admin/view/layout/layout.templ create mode 100644 weed/admin/view/layout/layout_templ.go create mode 100644 weed/command/admin.go diff --git a/Makefile b/Makefile index 509f23e85..09db0766b 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,20 @@ -.PHONY: test +.PHONY: test admin-generate admin-build admin-clean admin-dev admin-run admin-test admin-fmt admin-help BINARY = weed +ADMIN_DIR = weed/admin SOURCE_DIR = . debug ?= 0 all: install -install: +install: admin-generate cd weed; go install warp_install: go install github.com/minio/warp@v0.7.6 -full_install: +full_install: admin-generate cd weed; go install -tags "elastic gocdk sqlite ydb tarantool tikv rclone" server: install @@ -33,5 +34,38 @@ benchmark: install warp_install benchmark_with_pprof: debug = 1 benchmark_with_pprof: benchmark -test: +test: admin-generate cd weed; go test -tags "elastic gocdk sqlite ydb tarantool tikv rclone" -v ./... + +# Admin component targets +admin-generate: + @echo "Generating admin component templates..." + @cd $(ADMIN_DIR) && $(MAKE) generate + +admin-build: admin-generate + @echo "Building admin component..." + @cd $(ADMIN_DIR) && $(MAKE) build + +admin-clean: + @echo "Cleaning admin component..." + @cd $(ADMIN_DIR) && $(MAKE) clean + +admin-dev: + @echo "Starting admin development server..." + @cd $(ADMIN_DIR) && $(MAKE) dev + +admin-run: + @echo "Running admin server..." + @cd $(ADMIN_DIR) && $(MAKE) run + +admin-test: + @echo "Testing admin component..." + @cd $(ADMIN_DIR) && $(MAKE) test + +admin-fmt: + @echo "Formatting admin component..." + @cd $(ADMIN_DIR) && $(MAKE) fmt + +admin-help: + @echo "Admin component help..." + @cd $(ADMIN_DIR) && $(MAKE) help diff --git a/go.mod b/go.mod index 93eedec7f..d660239a4 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/gocql/gocql v1.7.0 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v1.0.0 // indirect github.com/google/btree v1.1.3 github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 // indirect @@ -123,6 +123,7 @@ require ( require ( github.com/Jille/raft-grpc-transport v1.6.1 + github.com/a-h/templ v0.3.906 github.com/arangodb/go-driver v1.6.6 github.com/armon/go-metrics v0.4.1 github.com/aws/aws-sdk-go-v2 v1.36.5 @@ -132,6 +133,8 @@ require ( github.com/cognusion/imaging v1.0.2 github.com/fluent/fluent-logger-golang v1.10.0 github.com/getsentry/sentry-go v0.33.0 + github.com/gin-contrib/sessions v1.0.4 + github.com/gin-gonic/gin v1.10.1 github.com/golang-jwt/jwt/v5 v5.2.2 github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50 github.com/hanwen/go-fuse/v2 v2.8.0 @@ -213,12 +216,15 @@ require ( github.com/bradenaw/juniper v0.15.3 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/buengese/sgzip v0.1.1 // indirect + github.com/bytedance/sonic v1.13.2 // indirect + github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/calebcase/tmpfile v1.0.3 // indirect github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/cloudinary/cloudinary-go/v2 v2.10.0 // indirect github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect + github.com/cloudwego/base64x v0.1.5 // indirect github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect github.com/colinmarc/hdfs/v2 v2.4.0 // indirect github.com/creasty/defaults v1.8.0 // indirect @@ -238,6 +244,7 @@ require ( github.com/flynn/noise v1.1.0 // indirect github.com/gabriel-vasile/mimetype v1.4.9 // indirect github.com/geoffgarside/ber v1.2.0 // indirect + github.com/gin-contrib/sse v1.0.0 // indirect github.com/go-chi/chi/v5 v5.2.2 // indirect github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect @@ -251,12 +258,16 @@ require ( github.com/go-playground/validator/v10 v10.26.0 // indirect github.com/go-resty/resty/v2 v2.16.5 // indirect github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/gorilla/context v1.1.2 // indirect github.com/gorilla/schema v1.4.1 // indirect + github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/sessions v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -341,7 +352,9 @@ require ( github.com/tinylib/msgp v1.3.0 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/twmb/murmur3 v1.1.3 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect github.com/unknwon/goconfig v1.0.0 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect @@ -367,6 +380,7 @@ require ( go.opentelemetry.io/otel/trace v1.36.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + golang.org/x/arch v0.16.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/time v0.12.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect diff --git a/go.sum b/go.sum index 26792b28d..8b39dbd16 100644 --- a/go.sum +++ b/go.sum @@ -622,6 +622,8 @@ github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= +github.com/a-h/templ v0.3.906 h1:ZUThc8Q9n04UATaCwaG60pB1AqbulLmYEAMnWV63svg= +github.com/a-h/templ v0.3.906/go.mod h1:FFAu4dI//ESmEN7PQkJ7E7QfnSEMdcnu7QrAY8Dn334= github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3/go.mod h1:XaUnRxSCYgL3kkgX0QHIV0D+znljPIDImxlv2kbGv0Y= github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= @@ -725,6 +727,7 @@ github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgIS github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo= @@ -760,6 +763,7 @@ github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqL github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc/go.mod h1:uvR42Hb/t52HQd7x5/ZLzZEK8oihrFpgnodIJ1vte2E= github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -888,10 +892,12 @@ github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNe github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sessions v1.0.4 h1:ha6CNdpYiTOK/hTp05miJLbpTSNfOnFg5Jm2kbcqy8U= +github.com/gin-contrib/sessions v1.0.4/go.mod h1:ccmkrb2z6iU2osiAHZG3x3J4suJK+OU27oqzlWOqQgs= github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= -github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= -github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= +github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68= @@ -1018,10 +1024,11 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= -github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s= +github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -1113,14 +1120,18 @@ github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/context v1.1.2 h1:WRkNAv2uoa03QNIc1A6u4O7DAGMUVoopZhkiXWA2V1o= +github.com/gorilla/context v1.1.2/go.mod h1:KDPwT9i/MeWHiLl90fuTgrt4/wPcv75vFAZLaOOcbxM= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= +github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/sessions v1.4.0 h1:kpIYOp/oi6MG/p5PgxApU8srsSw9tuFbt46Lt7auzqQ= +github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -1245,6 +1256,7 @@ github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2 github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 h1:CjEMN21Xkr9+zwPmZPaJJw+apzVbjGL5uK/6g9Q2jGU= @@ -1746,8 +1758,8 @@ gocloud.dev/pubsub/natspubsub v0.42.0 h1:sjz9PNIT28us6UVctyZZVDlBoGfUXSqvBX5rcT3 gocloud.dev/pubsub/natspubsub v0.42.0/go.mod h1:Y25oPmk9vWg1pathkY85+u+9zszMGhI+xhdFUSWnins= gocloud.dev/pubsub/rabbitpubsub v0.41.0 h1:RutvHbacZxlFr0t3wlr+kz63j53UOfHY3PJR8NKN1EI= gocloud.dev/pubsub/rabbitpubsub v0.41.0/go.mod h1:s7oQXOlQ2FOj8XmYMv5Ocgs1t+8hIXfsKaWGgECM9SQ= -golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4= -golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.16.0 h1:foMtLTdyOmIniqWCHjY6+JxuC54XP1fDwx4N0ASyW+U= +golang.org/x/arch v0.16.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -2576,6 +2588,7 @@ modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/weed/admin/DESIGN.md b/weed/admin/DESIGN.md new file mode 100644 index 000000000..3823c15d6 --- /dev/null +++ b/weed/admin/DESIGN.md @@ -0,0 +1,321 @@ +# SeaweedFS Admin Interface Web Component Design + +## Overview + +The SeaweedFS Admin Interface is a modern web-based administration interface for SeaweedFS clusters, following the **Gin + Templ + HTMX** architecture pattern. It provides comprehensive cluster management, monitoring, and maintenance capabilities through an intuitive web interface. + +## Architecture + +### Technology Stack + +- **Backend Framework**: Gin (Go HTTP web framework) +- **Template Engine**: Templ (Type-safe Go templates) +- **Frontend Enhancement**: HTMX (Dynamic interactions without JavaScript frameworks) +- **CSS Framework**: Bootstrap 5 (Modern responsive design) +- **Icons**: Font Awesome 6 (Comprehensive icon library) +- **Authentication**: Session-based with configurable credentials + +### Directory Structure + +``` +weed/admin/ +├── admin.go # Main entry point & router setup +├── dash/ # Core admin logic +│ ├── admin_server.go # Server struct & cluster operations +│ ├── handler_auth.go # Authentication handlers +│ ├── handler_admin.go # Main admin handlers +│ ├── middleware.go # Authentication middleware +│ └── ... # Additional handlers +├── view/ # Template components +│ ├── layout/ +│ │ └── layout.templ # Base layout & login form +│ └── app/ +│ ├── admin.templ # Admin page template +│ └── template_helpers.go # Formatting utilities +├── static/ # Static assets +│ ├── css/ +│ │ └── admin.css # Custom styles +│ └── js/ +│ └── admin.js # Interactive functionality +└── templates/ # Embedded templates +``` + +## Core Features + +### 1. **Cluster Management** + +#### Topology Visualization +- **Data Center/Rack/Node Hierarchy**: Visual representation of cluster topology +- **Real-time Status Monitoring**: Live status updates for all cluster components +- **Capacity Planning**: Volume utilization and capacity tracking +- **Health Assessment**: Automated health scoring and alerts + +#### Master Node Management +- **Leader/Follower Status**: Clear indication of Raft leadership +- **Master Configuration**: View and modify master settings +- **Cluster Membership**: Add/remove master nodes +- **Heartbeat Monitoring**: Track master node availability + +#### Volume Server Operations +- **Server Registration**: Automatic detection of new volume servers +- **Disk Usage Monitoring**: Real-time disk space and volume tracking +- **Performance Metrics**: I/O statistics and throughput monitoring +- **Maintenance Mode**: Graceful server shutdown and maintenance + +### 2. **Volume Management** + +#### Volume Operations +- **Volume Creation**: Create new volumes with replication settings +- **Volume Listing**: Comprehensive volume inventory with search/filter +- **Volume Details**: Detailed information per volume (files, size, replicas) +- **Volume Migration**: Move volumes between servers +- **Volume Deletion**: Safe volume removal with confirmation + +#### Storage Operations +- **Volume Growing**: Automatic volume expansion based on policies +- **Vacuum Operations**: Reclaim deleted file space +- **Compaction**: Optimize volume storage efficiency +- **Rebalancing**: Distribute volumes evenly across servers + +### 3. **File Management** + +#### File Browser +- **Directory Navigation**: Browse filer directories with breadcrumbs +- **File Operations**: Upload, download, delete, rename files +- **Batch Operations**: Multi-file operations with progress tracking +- **Metadata Display**: File attributes, timestamps, permissions +- **Search Functionality**: Find files by name, type, or content + +#### Storage Analytics +- **Usage Statistics**: File count, size distribution, growth trends +- **Access Patterns**: Popular files and access frequency +- **Storage Efficiency**: Compression ratios and duplicate detection + +### 4. **Monitoring & Metrics** + +#### Real-time Dashboards +- **System Overview**: Cluster health at a glance +- **Performance Metrics**: Throughput, latency, and error rates +- **Resource Utilization**: CPU, memory, disk, and network usage +- **Historical Trends**: Long-term performance analysis + +#### Alerting System +- **Threshold Monitoring**: Configurable alerts for key metrics +- **Health Checks**: Automated health assessment and scoring +- **Notification Channels**: Email, webhook, and dashboard notifications + +### 5. **Configuration Management** + +#### Cluster Configuration +- **Master Settings**: Replication, security, and operational parameters +- **Volume Server Config**: Storage paths, limits, and performance settings +- **Filer Configuration**: Metadata storage and caching options +- **Security Settings**: Authentication, authorization, and encryption + +#### Backup & Restore +- **Configuration Backup**: Export cluster configuration +- **Configuration Restore**: Import and apply saved configurations +- **Version Control**: Track configuration changes over time + +### 6. **System Maintenance** + +#### Maintenance Operations +- **Garbage Collection**: Clean up orphaned files and metadata +- **Volume Repair**: Fix corrupted or inconsistent volumes +- **Cluster Validation**: Verify cluster integrity and consistency +- **Performance Tuning**: Optimize cluster performance parameters + +#### Log Management +- **Log Aggregation**: Centralized logging from all cluster components +- **Log Analysis**: Search, filter, and analyze system logs +- **Error Tracking**: Identify and track system errors and warnings +- **Log Export**: Download logs for external analysis + +## User Interface Design + +### Layout Components + +#### Header Navigation +- **Cluster Status Indicator**: Quick health overview +- **User Information**: Current user and session details +- **Quick Actions**: Frequently used operations +- **Search Bar**: Global search across cluster resources + +#### Sidebar Navigation +- **Cluster Section**: Topology, status, and management +- **Management Section**: Files, volumes, and operations +- **System Section**: Configuration, logs, and maintenance +- **Contextual Actions**: Dynamic actions based on current view + +#### Main Content Area +- **Dashboard Cards**: Key metrics and status summaries +- **Data Tables**: Sortable, filterable resource listings +- **Interactive Charts**: Real-time metrics visualization +- **Action Panels**: Operation forms and bulk actions + +### Responsive Design +- **Mobile Responsive**: Optimized for tablets and mobile devices +- **Progressive Enhancement**: Works with JavaScript disabled +- **Accessibility**: WCAG 2.1 compliant interface +- **Theme Support**: Light/dark mode support + +## Security Features + +### Authentication & Authorization +- **Configurable Authentication**: Optional password protection +- **Session Management**: Secure session handling with timeouts +- **Role-based Access**: Different permission levels for users +- **Audit Logging**: Track all administrative actions + +### Security Hardening +- **HTTPS Support**: TLS encryption for all communications +- **CSRF Protection**: Cross-site request forgery prevention +- **Input Validation**: Comprehensive input sanitization +- **Rate Limiting**: Prevent abuse and DoS attacks + +## API Design + +### RESTful Endpoints +```go +// Public endpoints +GET /health # Health check +GET /login # Login form +POST /login # Process login +GET /logout # Logout + +// Protected endpoints +GET /admin # Main admin interface +GET /overview # Cluster overview API + +// Cluster management +GET /cluster # Cluster topology view +GET /cluster/topology # Topology data API +GET /cluster/status # Cluster status API +POST /cluster/grow # Grow volumes +POST /cluster/vacuum # Vacuum operation +POST /cluster/rebalance # Rebalance cluster + +// Volume management +GET /volumes # Volumes list page +GET /volumes/list # Volumes data API +GET /volumes/details/:id # Volume details +POST /volumes/create # Create volume +DELETE /volumes/delete/:id # Delete volume + +// File management +GET /filer # File browser page +GET /filer/browser # File browser interface +GET /filer/browser/api/* # File operations API +POST /filer/upload # File upload +DELETE /filer/delete # File deletion + +// Monitoring +GET /metrics # Metrics dashboard +GET /metrics/data # Metrics data API +GET /metrics/realtime # Real-time metrics +GET /logs # Logs viewer +GET /logs/download/:type # Download logs + +// Configuration +GET /config # Configuration page +GET /config/current # Current configuration +POST /config/update # Update configuration +GET /config/backup # Backup configuration + +// Maintenance +GET /maintenance # Maintenance page +POST /maintenance/gc # Garbage collection +POST /maintenance/compact # Volume compaction +GET /maintenance/status # Maintenance status +``` + +## Development Guidelines + +### Code Organization +- **Handler Separation**: Separate files for different functional areas +- **Type Safety**: Use strongly typed structures for all data +- **Error Handling**: Comprehensive error handling and user feedback +- **Testing**: Unit and integration tests for all components + +### Performance Considerations +- **Caching Strategy**: Intelligent caching of cluster topology and metrics +- **Lazy Loading**: Load data on demand to improve responsiveness +- **Batch Operations**: Efficient bulk operations for large datasets +- **Compression**: Gzip compression for API responses + +### Monitoring Integration +- **Metrics Export**: Prometheus-compatible metrics endpoint +- **Health Checks**: Kubernetes-style health and readiness probes +- **Distributed Tracing**: OpenTelemetry integration for request tracing +- **Structured Logging**: JSON logging for better observability + +## Deployment Options + +### Standalone Deployment +```bash +# Start dashboard server +./weed dashboard -port=9999 \ + -masters="master1:9333,master2:9333" \ + -filer="filer:8888" \ + -adminUser="admin" \ + -adminPassword="secretpassword" +``` + +### Docker Deployment +```yaml +# docker-compose.yml +version: '3.8' +services: + dashboard: + image: seaweedfs:latest + command: dashboard -port=9999 -masters=master:9333 -filer=filer:8888 + ports: + - "9999:9999" + environment: + - ADMIN_USER=admin + - ADMIN_PASSWORD=secretpassword +``` + +### Kubernetes Deployment +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: seaweedfs-dashboard +spec: + replicas: 1 + selector: + matchLabels: + app: seaweedfs-dashboard + template: + metadata: + labels: + app: seaweedfs-dashboard + spec: + containers: + - name: dashboard + image: seaweedfs:latest + command: ["weed", "dashboard"] + args: + - "-port=9999" + - "-masters=seaweedfs-master:9333" + - "-filer=seaweedfs-filer:8888" + ports: + - containerPort: 9999 +``` + +## Future Enhancements + +### Advanced Features +- **Multi-cluster Management**: Manage multiple SeaweedFS clusters +- **Advanced Analytics**: Machine learning-powered insights +- **Custom Dashboards**: User-configurable dashboard layouts +- **API Integration**: Webhook integration with external systems + +### Enterprise Features +- **SSO Integration**: LDAP, OAuth, and SAML authentication +- **Advanced RBAC**: Fine-grained permission system +- **Audit Compliance**: SOX, HIPAA, and PCI compliance features +- **High Availability**: Multi-instance dashboard deployment + +This design provides a comprehensive, modern, and scalable web interface for SeaweedFS administration, following industry best practices and providing an excellent user experience for cluster operators and administrators. \ No newline at end of file diff --git a/weed/admin/Makefile b/weed/admin/Makefile new file mode 100644 index 000000000..b79ddc1ab --- /dev/null +++ b/weed/admin/Makefile @@ -0,0 +1,165 @@ +# SeaweedFS Admin Component Makefile + +# Variables +ADMIN_DIR := . +VIEW_DIR := $(ADMIN_DIR)/view +STATIC_DIR := $(ADMIN_DIR)/static +TEMPL_FILES := $(shell find $(VIEW_DIR) -name "*.templ") +TEMPL_GO_FILES := $(TEMPL_FILES:.templ=_templ.go) +GO_FILES := $(shell find $(ADMIN_DIR) -name "*.go" -not -name "*_templ.go") +BUILD_DIR := ../.. +WEED_BINARY := $(BUILD_DIR)/weed + +# Default target +.PHONY: all +all: build + +# Install templ if not present +.PHONY: install-templ +install-templ: + @which templ > /dev/null || (echo "Installing templ..." && go install github.com/a-h/templ/cmd/templ@latest) + +# Generate templ files +.PHONY: generate +generate: install-templ + @echo "Generating templ files..." + @templ generate + @echo "Generated: $(TEMPL_GO_FILES)" + +# Clean generated files +.PHONY: clean-templ +clean-templ: + @echo "Cleaning generated templ files..." + @find $(VIEW_DIR) -name "*_templ.go" -delete + @echo "Cleaned templ files" + +# Watch for changes and regenerate +.PHONY: watch +watch: install-templ + @echo "Watching for templ file changes..." + @templ generate --watch + +# Build the main weed binary with admin component +.PHONY: build +build: generate + @echo "Building weed binary with admin component..." + @cd $(BUILD_DIR) && go build -o weed ./weed + @echo "Built: $(BUILD_DIR)/weed" + +# Test the admin component +.PHONY: test +test: generate + @echo "Running admin component tests..." + @go test ./... + +# Run the admin server via weed command +.PHONY: run +run: build + @echo "Starting admin server via weed command..." + @cd $(BUILD_DIR) && ./weed admin + +# Development server with auto-reload +.PHONY: dev +dev: generate + @echo "Starting development server with auto-reload..." + @echo "Note: You'll need to manually restart the server when Go files change" + @cd $(BUILD_DIR) && ./weed admin -port=23647 & + @$(MAKE) watch + +# Lint the code +.PHONY: lint +lint: + @echo "Linting admin component..." + @golangci-lint run ./... + +# Format the code +.PHONY: fmt +fmt: + @echo "Formatting Go code..." + @go fmt ./... + @echo "Formatting templ files..." + @templ fmt $(VIEW_DIR) + +# Validate static files exist +.PHONY: validate-static +validate-static: + @echo "Validating static files..." + @test -f $(STATIC_DIR)/css/admin.css || (echo "Missing: admin.css" && exit 1) + @test -f $(STATIC_DIR)/js/admin.js || (echo "Missing: admin.js" && exit 1) + @echo "Static files validated" + +# Check dependencies +.PHONY: deps +deps: + @echo "Checking dependencies..." + @go mod tidy + @go mod verify + +# Clean all build artifacts +.PHONY: clean +clean: clean-templ + @echo "Cleaning build artifacts..." + @rm -f $(BUILD_DIR)/weed 2>/dev/null || true + @echo "Cleaned build artifacts" + +# Install dependencies +.PHONY: install-deps +install-deps: + @echo "Installing Go dependencies..." + @go mod download + @$(MAKE) install-templ + +# Production build +.PHONY: build-prod +build-prod: clean generate validate-static + @echo "Building production binary..." + @cd $(BUILD_DIR) && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o weed-linux-amd64 ./weed + @echo "Built production binary: $(BUILD_DIR)/weed-linux-amd64" + +# Docker build (if needed) +.PHONY: docker-build +docker-build: generate + @echo "Building Docker image with admin component..." + @cd $(BUILD_DIR) && docker build -t seaweedfs/seaweedfs:latest . + +# Help target +.PHONY: help +help: + @echo "SeaweedFS Admin Component Makefile" + @echo "" + @echo "Available targets:" + @echo " all - Build the weed binary with admin component (default)" + @echo " generate - Generate templ files from templates" + @echo " build - Build weed binary with admin component" + @echo " build-prod - Build production binary" + @echo " run - Run admin server via weed command" + @echo " dev - Start development server with template watching" + @echo " test - Run tests" + @echo " watch - Watch for template changes and regenerate" + @echo " clean - Clean all build artifacts" + @echo " clean-templ - Clean generated template files" + @echo " fmt - Format Go and templ code" + @echo " lint - Lint the code" + @echo " deps - Check and tidy dependencies" + @echo " install-deps - Install all dependencies" + @echo " install-templ - Install templ compiler" + @echo " validate-static - Validate static files exist" + @echo " docker-build - Build Docker image" + @echo " help - Show this help message" + @echo "" + @echo "Examples:" + @echo " make generate # Generate templates" + @echo " make build # Build weed binary" + @echo " make run # Start admin server" + @echo " make dev # Development mode with auto-reload" + +# Make sure generated files are up to date before building +$(WEED_BINARY): $(TEMPL_GO_FILES) $(GO_FILES) + @$(MAKE) build + +# Auto-generate templ files when .templ files change +%_templ.go: %.templ + @echo "Regenerating $@ from $<" + @templ generate + +.PHONY: $(TEMPL_GO_FILES) \ No newline at end of file diff --git a/weed/admin/NAVIGATION_TEST.md b/weed/admin/NAVIGATION_TEST.md new file mode 100644 index 000000000..1668ce447 --- /dev/null +++ b/weed/admin/NAVIGATION_TEST.md @@ -0,0 +1,96 @@ +# Navigation Menu Test + +## Quick Test Guide + +To verify that the S3 Buckets link appears in the navigation menu: + +### 1. Start the Admin Server +```bash +# Start with minimal setup +weed admin -port=23646 -masters=localhost:9333 -filer=localhost:8888 + +# Or with dummy values for testing UI only +weed admin -port=23646 -masters=dummy:9333 -filer=dummy:8888 +``` + +### 2. Open Browser +Navigate to: `http://localhost:23646` + +### 3. Check Navigation Menu +Look for the sidebar navigation on the left side. You should see: + +**CLUSTER Section:** +- Admin +- Cluster +- Volumes + +**MANAGEMENT Section:** +- **S3 Buckets** ← This should be visible! +- File Browser +- Metrics +- Logs + +**SYSTEM Section:** +- Configuration +- Maintenance + +### 4. Test S3 Buckets Link +- Click on "S3 Buckets" in the sidebar +- Should navigate to `/s3/buckets` +- Should show the S3 bucket management page +- The "S3 Buckets" menu item should be highlighted as active + +### 5. Expected Behavior +- Menu item has cube icon: `📦 S3 Buckets` +- Link points to `/s3/buckets` +- Active state highlighting works +- Page loads S3 bucket management interface + +## Troubleshooting + +If the S3 Buckets link is not visible: + +1. **Check Template Generation:** + ```bash + cd weed/admin + templ generate + ``` + +2. **Rebuild Binary:** + ```bash + cd ../.. + go build -o weed weed/weed.go + ``` + +3. **Check Browser Console:** + - Open Developer Tools (F12) + - Look for any JavaScript errors + - Check Network tab for failed requests + +4. **Verify File Structure:** + ```bash + ls -la weed/admin/view/layout/layout_templ.go + ``` + +5. **Check Server Logs:** + - Look for any error messages when starting admin server + - Check for template compilation errors + +## Files Modified + +- `weed/admin/view/layout/layout.templ` - Added S3 Buckets menu item +- `weed/admin/static/js/admin.js` - Updated navigation highlighting +- `weed/command/admin.go` - Added S3 routes + +## Expected Navigation Structure + +```html + +``` \ No newline at end of file diff --git a/weed/admin/README.md b/weed/admin/README.md new file mode 100644 index 000000000..8364ebc2f --- /dev/null +++ b/weed/admin/README.md @@ -0,0 +1,279 @@ +# SeaweedFS Admin Component + +A modern web-based administration interface for SeaweedFS clusters built with Go, Gin, Templ, and Bootstrap. + +## Features + +- **Dashboard**: Real-time cluster status and metrics +- **Master Management**: Monitor master nodes and leadership status +- **Volume Server Management**: View volume servers, capacity, and health +- **S3 Bucket Management**: Create, delete, and manage S3 buckets with web interface +- **System Health**: Overall cluster health monitoring +- **Responsive Design**: Bootstrap-based UI that works on all devices +- **Authentication**: Optional user authentication with sessions +- **TLS Support**: HTTPS support for production deployments + +## Building + +### Using the Admin Makefile + +The admin component has its own Makefile for development and building: + +```bash +# Navigate to admin directory +cd weed/admin + +# View all available targets +make help + +# Generate templates and build +make build + +# Development mode with template watching +make dev + +# Run the admin server +make run + +# Clean build artifacts +make clean +``` + +### Using the Root Makefile + +The root SeaweedFS Makefile automatically integrates the admin component: + +```bash +# From the root directory +make install # Builds weed with admin component +make full_install # Full build with all tags +make test # Runs tests including admin component + +# Admin-specific targets from root +make admin-generate # Generate admin templates +make admin-build # Build admin component +make admin-run # Run admin server +make admin-dev # Development mode +make admin-clean # Clean admin artifacts +``` + +### Manual Building + +If you prefer to build manually: + +```bash +# Install templ compiler +go install github.com/a-h/templ/cmd/templ@latest + +# Generate templates +templ generate + +# Build the main weed binary +cd ../../../ +go build -o weed ./weed +``` + +## Development + +### Template Development + +The admin interface uses [Templ](https://templ.guide/) for type-safe HTML templates: + +```bash +# Watch for template changes and auto-regenerate +make watch + +# Or manually generate templates +make generate + +# Format templates +make fmt +``` + +### File Structure + +``` +weed/admin/ +├── Makefile # Admin-specific build tasks +├── README.md # This file +├── S3_BUCKETS.md # S3 bucket management documentation +├── admin.go # Main application entry point +├── dash/ # Server and handler logic +│ ├── admin_server.go # HTTP server setup +│ ├── handler_admin.go # Admin dashboard handlers +│ ├── handler_auth.go # Authentication handlers +│ └── middleware.go # HTTP middleware +├── static/ # Static assets +│ ├── css/admin.css # Admin-specific styles +│ └── js/admin.js # Admin-specific JavaScript +└── view/ # Templates + ├── app/ # Application templates + │ ├── admin.templ # Main dashboard template + │ ├── s3_buckets.templ # S3 bucket management template + │ └── *_templ.go # Generated Go code + └── layout/ # Layout templates + ├── layout.templ # Base layout template + └── layout_templ.go # Generated Go code +``` + +### S3 Bucket Management + +The admin interface includes comprehensive S3 bucket management capabilities. See [S3_BUCKETS.md](S3_BUCKETS.md) for detailed documentation on: + +- Creating and deleting S3 buckets +- Viewing bucket contents and metadata +- Managing bucket permissions and settings +- API endpoints for programmatic access + +## Usage + +### Basic Usage + +```bash +# Start admin interface on default port (23646) +weed admin + +# Start with custom configuration +weed admin -port=8080 -masters="master1:9333,master2:9333" -filer="filer:8888" + +# Start with authentication +weed admin -adminUser=admin -adminPassword=secret123 + +# Start with HTTPS +weed admin -port=443 -tlsCert=/path/to/cert.pem -tlsKey=/path/to/key.pem +``` + +### Configuration Options + +| Option | Default | Description | +|--------|---------|-------------| +| `-port` | 23646 | Admin server port | +| `-masters` | localhost:9333 | Comma-separated master servers | +| `-adminUser` | admin | Admin username (if auth enabled) | +| `-adminPassword` | "" | Admin password (empty = no auth) | +| `-tlsCert` | "" | Path to TLS certificate | +| `-tlsKey` | "" | Path to TLS private key | + +### Docker Usage + +```bash +# Build Docker image with admin component +make docker-build + +# Run with Docker +docker run -p 23646:23646 seaweedfs/seaweedfs:latest admin -masters=host.docker.internal:9333 +``` + +## Development Workflow + +### Quick Start + +```bash +# Clone and setup +git clone +cd seaweedfs/weed/admin + +# Install dependencies and build +make install-deps +make build + +# Start development server +make dev +``` + +### Making Changes + +1. **Template Changes**: Edit `.templ` files in `view/` + - Templates auto-regenerate in development mode + - Use `make generate` to manually regenerate + +2. **Go Code Changes**: Edit `.go` files + - Restart the server to see changes + - Use `make build` to rebuild + +3. **Static Assets**: Edit files in `static/` + - Changes are served immediately + +### Testing + +```bash +# Run admin component tests +make test + +# Run from root directory +make admin-test + +# Lint code +make lint + +# Format code +make fmt +``` + +## Production Deployment + +### Security Considerations + +1. **Authentication**: Always set `adminPassword` in production +2. **HTTPS**: Use TLS certificates for encrypted connections +3. **Firewall**: Restrict admin interface access to authorized networks + +### Example Production Setup + +```bash +# Production deployment with security +weed admin \ + -port=443 \ + -masters="master1:9333,master2:9333,master3:9333" \ + -adminUser=admin \ + -adminPassword=your-secure-password \ + -tlsCert=/etc/ssl/certs/admin.crt \ + -tlsKey=/etc/ssl/private/admin.key +``` + +### Monitoring + +The admin interface provides endpoints for monitoring: + +- `GET /health` - Health check endpoint +- `GET /metrics` - Prometheus metrics (if enabled) +- `GET /api/status` - JSON status information + +## Troubleshooting + +### Common Issues + +1. **Templates not found**: Run `make generate` to create template files +2. **Build errors**: Ensure `templ` is installed with `make install-templ` +3. **Static files not loading**: Check that `static/` directory exists and has proper files +4. **Connection errors**: Verify master and filer addresses are correct + +### Debug Mode + +```bash +# Enable debug logging +weed -v=2 admin + +# Check generated templates +ls -la view/app/*_templ.go view/layout/*_templ.go +``` + +## Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Run tests: `make test` +5. Format code: `make fmt` +6. Submit a pull request + +## Architecture + +The admin component follows a clean architecture: + +- **Presentation Layer**: Templ templates + Bootstrap CSS +- **HTTP Layer**: Gin router with middleware +- **Business Logic**: Handler functions in `dash/` package +- **Data Layer**: Communicates with SeaweedFS masters and filers + +This separation makes the code maintainable and testable. \ No newline at end of file diff --git a/weed/admin/S3_BUCKETS.md b/weed/admin/S3_BUCKETS.md new file mode 100644 index 000000000..d147a33e3 --- /dev/null +++ b/weed/admin/S3_BUCKETS.md @@ -0,0 +1,174 @@ +# S3 Bucket Management + +The SeaweedFS Admin Interface now includes comprehensive S3 bucket management capabilities. + +## Features + +### Bucket Overview +- **Dashboard View**: List all S3 buckets with summary statistics +- **Bucket Statistics**: Total buckets, storage usage, object counts +- **Status Monitoring**: Real-time bucket status and health indicators + +### Bucket Operations +- **Create Buckets**: Create new S3 buckets +- **Delete Buckets**: Remove buckets and all their contents (with confirmation) +- **View Details**: Browse bucket contents and object listings +- **Export Data**: Export bucket lists to CSV format + +### Bucket Information +Each bucket displays: +- **Name**: Bucket identifier +- **Created Date**: When the bucket was created +- **Object Count**: Number of objects stored +- **Total Size**: Storage space used (formatted in KB/MB/GB/TB) +- **Region**: Configured AWS region +- **Status**: Current operational status + +## Usage + +### Accessing S3 Bucket Management + +1. Start the admin server: + ```bash + weed admin -port=23646 -masters=localhost:9333 -filer=localhost:8888 + ``` + +2. Open your browser to: `http://localhost:23646` + +3. Click the "S3 Buckets" button in the dashboard toolbar + +4. Or navigate directly to: `http://localhost:23646/s3/buckets` + +### Creating a New Bucket + +1. Click the "Create Bucket" button +2. Enter a valid bucket name (3-63 characters, lowercase letters, numbers, dots, hyphens) +3. Select a region (defaults to us-east-1) +4. Click "Create Bucket" + +### Deleting a Bucket + +1. Click the trash icon next to the bucket name +2. Confirm the deletion in the modal dialog +3. **Warning**: This permanently deletes the bucket and all its contents + +### Viewing Bucket Details + +1. Click on a bucket name to view detailed information +2. See all objects within the bucket +3. View object metadata (size, last modified, etc.) + +## API Endpoints + +The S3 bucket management feature exposes REST API endpoints: + +### List Buckets +``` +GET /api/s3/buckets +``` +Returns JSON array of all buckets with metadata. + +### Create Bucket +``` +POST /api/s3/buckets +Content-Type: application/json + +{ + "name": "my-bucket-name", + "region": "us-east-1" +} +``` + +### Delete Bucket +``` +DELETE /api/s3/buckets/{bucket-name} +``` +Permanently deletes the bucket and all contents. + +### Get Bucket Details +``` +GET /api/s3/buckets/{bucket-name} +``` +Returns detailed bucket information including object listings. + +## Technical Implementation + +### Backend Integration +- **Filer Integration**: Uses SeaweedFS filer for bucket storage at `/buckets/` +- **Streaming API**: Efficiently handles large bucket listings +- **Error Handling**: Comprehensive error reporting and recovery + +### Frontend Features +- **Bootstrap UI**: Modern, responsive web interface +- **Real-time Updates**: Automatic refresh after operations +- **Form Validation**: Client-side bucket name validation +- **Modal Dialogs**: User-friendly create/delete workflows + +### Security Considerations +- **Confirmation Dialogs**: Prevent accidental deletions +- **Input Validation**: Prevent invalid bucket names +- **Error Messages**: Clear feedback for failed operations + +## Bucket Naming Rules + +S3 bucket names must follow these rules: +- 3-63 characters in length +- Contain only lowercase letters, numbers, dots (.), and hyphens (-) +- Start and end with a lowercase letter or number +- Cannot contain spaces or special characters +- Cannot be formatted as an IP address + +## Storage Structure + +Buckets are stored in the SeaweedFS filer at: +``` +/buckets/{bucket-name}/ +``` + +Each bucket directory contains: +- Object files with their original names +- Nested directories for object key prefixes +- Metadata preserved from S3 operations + +## Performance Notes + +- **Lazy Loading**: Bucket sizes and object counts are calculated on-demand +- **Streaming**: Large bucket listings use streaming responses +- **Caching**: Cluster topology data is cached for performance +- **Pagination**: Large object lists are handled efficiently + +## Troubleshooting + +### Common Issues + +1. **Bucket Creation Fails** + - Check bucket name follows S3 naming rules + - Ensure filer is accessible and running + - Verify sufficient storage space + +2. **Bucket Deletion Fails** + - Ensure bucket exists and is accessible + - Check for permission issues + - Verify filer connectivity + +3. **Bucket List Empty** + - Verify filer has `/buckets/` directory + - Check filer connectivity + - Ensure buckets were created through S3 API + +### Debug Steps + +1. Check admin server logs for error messages +2. Verify filer is running and accessible +3. Test filer connectivity: `curl http://localhost:8888/` +4. Check browser console for JavaScript errors + +## Future Enhancements + +- **Bucket Policies**: Manage access control policies +- **Lifecycle Rules**: Configure object lifecycle management +- **Versioning**: Enable/disable bucket versioning +- **Replication**: Configure cross-region replication +- **Metrics**: Detailed usage and performance metrics +- **Notifications**: Bucket event notifications +- **Search**: Search and filter bucket contents \ No newline at end of file diff --git a/weed/admin/admin.go b/weed/admin/admin.go new file mode 100644 index 000000000..dbda0c402 --- /dev/null +++ b/weed/admin/admin.go @@ -0,0 +1,247 @@ +package main + +import ( + "context" + "crypto/tls" + "embed" + "flag" + "fmt" + "io/fs" + "log" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/gin-contrib/sessions" + "github.com/gin-contrib/sessions/cookie" + "github.com/gin-gonic/gin" + + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +//go:embed static/* view/* +var adminFS embed.FS + +func main() { + var ( + port = flag.Int("port", 23646, "Port to run the admin server on") + host = flag.String("host", "localhost", "Host to bind the admin server to") + sessionKey = flag.String("sessionKey", "", "Session encryption key (32 bytes, random if not provided)") + tlsCert = flag.String("tlsCert", "", "Path to TLS certificate file") + tlsKey = flag.String("tlsKey", "", "Path to TLS key file") + master = flag.String("master", "localhost:9333", "SeaweedFS master server address") + authRequired = flag.Bool("auth", false, "Enable authentication") + username = flag.String("username", "admin", "Admin username (only used if auth is enabled)") + password = flag.String("password", "", "Admin password (only used if auth is enabled)") + help = flag.Bool("help", false, "Show help") + ) + + flag.Parse() + + if *help { + fmt.Println("SeaweedFS Admin Server") + fmt.Println() + flag.PrintDefaults() + return + } + + // Set Gin mode + gin.SetMode(gin.ReleaseMode) + + // Create router + r := gin.New() + r.Use(gin.Logger(), gin.Recovery()) + + // Session store + var sessionKeyBytes []byte + if *sessionKey != "" { + sessionKeyBytes = []byte(*sessionKey) + } else { + // Generate a random session key + sessionKeyBytes = make([]byte, 32) + for i := range sessionKeyBytes { + sessionKeyBytes[i] = byte(time.Now().UnixNano() & 0xff) + } + } + store := cookie.NewStore(sessionKeyBytes) + r.Use(sessions.Sessions("admin-session", store)) + + // Static files + staticFS, err := fs.Sub(adminFS, "static") + if err != nil { + log.Fatal("Failed to create static filesystem:", err) + } + r.StaticFS("/static", http.FS(staticFS)) + + // Templates + viewFS, err := fs.Sub(adminFS, "view") + if err != nil { + log.Fatal("Failed to create view filesystem:", err) + } + + // Create admin server + adminServer := dash.NewAdminServer(*master, http.FS(viewFS)) + + // Setup routes + setupRoutes(r, adminServer, *authRequired, *username, *password) + + // Server configuration + addr := fmt.Sprintf("%s:%d", *host, *port) + server := &http.Server{ + Addr: addr, + Handler: r, + } + + // TLS configuration + if *tlsCert != "" && *tlsKey != "" { + server.TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + } + + // Start server + go func() { + log.Printf("Starting SeaweedFS Admin Server on %s", addr) + + var err error + if *tlsCert != "" && *tlsKey != "" { + log.Printf("Using TLS with cert: %s, key: %s", *tlsCert, *tlsKey) + err = server.ListenAndServeTLS(*tlsCert, *tlsKey) + } else { + err = server.ListenAndServe() + } + + if err != nil && err != http.ErrServerClosed { + log.Fatal("Failed to start server:", err) + } + }() + + // Wait for interrupt signal to gracefully shutdown the server + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + <-quit + log.Println("Shutting down admin server...") + + // Give outstanding requests 30 seconds to complete + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if err := server.Shutdown(ctx); err != nil { + log.Fatal("Admin server forced to shutdown:", err) + } + + log.Println("Admin server exited") +} + +func setupRoutes(r *gin.Engine, adminServer *dash.AdminServer, authRequired bool, username, password string) { + // Health check (no auth required) + r.GET("/health", func(c *gin.Context) { + c.JSON(200, gin.H{"status": "ok"}) + }) + + if authRequired { + // Auth routes + auth := r.Group("/") + auth.GET("/login", adminServer.ShowLogin) + auth.POST("/login", adminServer.HandleLogin(username, password)) + auth.POST("/logout", adminServer.HandleLogout) + + // Protected routes + protected := r.Group("/") + protected.Use(dash.RequireAuth()) + + // Admin routes + protected.GET("/", adminServer.ShowAdmin) + protected.GET("/admin", adminServer.ShowAdmin) + protected.GET("/overview", adminServer.ShowAdmin) + + // Cluster management + cluster := protected.Group("/cluster") + { + cluster.GET("/topology", adminServer.GetClusterTopologyHandler) + cluster.GET("/masters", adminServer.GetMasters) + cluster.GET("/volumes", adminServer.GetVolumeServers) + cluster.POST("/volumes/assign", adminServer.AssignVolume) + } + + // Volume management + volumes := protected.Group("/volumes") + { + volumes.GET("/", adminServer.ListVolumes) + volumes.POST("/create", adminServer.CreateVolume) + volumes.DELETE("/:id", adminServer.DeleteVolume) + volumes.POST("/:id/replicate", adminServer.ReplicateVolume) + } + + // File browser + files := protected.Group("/filer") + { + files.GET("/*path", adminServer.BrowseFiles) + files.POST("/upload", adminServer.UploadFile) + files.DELETE("/*path", adminServer.DeleteFile) + } + + // Metrics + metrics := protected.Group("/metrics") + { + metrics.GET("/", adminServer.ShowMetrics) + metrics.GET("/data", adminServer.GetMetricsData) + } + + // Maintenance + maintenance := protected.Group("/maintenance") + { + maintenance.POST("/gc", adminServer.TriggerGC) + maintenance.POST("/compact", adminServer.CompactVolumes) + maintenance.GET("/status", adminServer.GetMaintenanceStatus) + } + } else { + // No auth required - all routes are public + r.GET("/", adminServer.ShowAdmin) + r.GET("/admin", adminServer.ShowAdmin) + r.GET("/overview", adminServer.ShowAdmin) + + // Cluster management + cluster := r.Group("/cluster") + { + cluster.GET("/topology", adminServer.GetClusterTopologyHandler) + cluster.GET("/masters", adminServer.GetMasters) + cluster.GET("/volumes", adminServer.GetVolumeServers) + cluster.POST("/volumes/assign", adminServer.AssignVolume) + } + + // Volume management + volumes := r.Group("/volumes") + { + volumes.GET("/", adminServer.ListVolumes) + volumes.POST("/create", adminServer.CreateVolume) + volumes.DELETE("/:id", adminServer.DeleteVolume) + volumes.POST("/:id/replicate", adminServer.ReplicateVolume) + } + + // File browser + files := r.Group("/filer") + { + files.GET("/*path", adminServer.BrowseFiles) + files.POST("/upload", adminServer.UploadFile) + files.DELETE("/*path", adminServer.DeleteFile) + } + + // Metrics + metrics := r.Group("/metrics") + { + metrics.GET("/", adminServer.ShowMetrics) + metrics.GET("/data", adminServer.GetMetricsData) + } + + // Maintenance + maintenance := r.Group("/maintenance") + { + maintenance.POST("/gc", adminServer.TriggerGC) + maintenance.POST("/compact", adminServer.CompactVolumes) + maintenance.GET("/status", adminServer.GetMaintenanceStatus) + } + } +} diff --git a/weed/admin/dash/admin_server.go b/weed/admin/dash/admin_server.go new file mode 100644 index 000000000..fe37f5bb7 --- /dev/null +++ b/weed/admin/dash/admin_server.go @@ -0,0 +1,1146 @@ +package dash + +import ( + "context" + "fmt" + "net/http" + "os" + "sort" + "strings" + "time" + + "github.com/seaweedfs/seaweedfs/weed/cluster" + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/operation" + "github.com/seaweedfs/seaweedfs/weed/pb" + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" + "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" + "github.com/seaweedfs/seaweedfs/weed/security" + "github.com/seaweedfs/seaweedfs/weed/util" + "google.golang.org/grpc" +) + +type AdminServer struct { + masterAddress string + templateFS http.FileSystem + grpcDialOption grpc.DialOption + cacheExpiration time.Duration + lastCacheUpdate time.Time + cachedTopology *ClusterTopology + + // Filer discovery and caching + cachedFilers []string + lastFilerUpdate time.Time + filerCacheExpiration time.Duration +} + +type ClusterTopology struct { + Masters []MasterNode `json:"masters"` + DataCenters []DataCenter `json:"datacenters"` + VolumeServers []VolumeServer `json:"volume_servers"` + TotalVolumes int `json:"total_volumes"` + TotalFiles int64 `json:"total_files"` + TotalSize int64 `json:"total_size"` + UpdatedAt time.Time `json:"updated_at"` +} + +type MasterNode struct { + Address string `json:"address"` + IsLeader bool `json:"is_leader"` + Status string `json:"status"` +} + +type DataCenter struct { + ID string `json:"id"` + Racks []Rack `json:"racks"` +} + +type Rack struct { + ID string `json:"id"` + Nodes []VolumeServer `json:"nodes"` +} + +type VolumeServer struct { + ID string `json:"id"` + Address string `json:"address"` + DataCenter string `json:"datacenter"` + Rack string `json:"rack"` + PublicURL string `json:"public_url"` + Volumes int `json:"volumes"` + MaxVolumes int `json:"max_volumes"` + DiskUsage int64 `json:"disk_usage"` + DiskCapacity int64 `json:"disk_capacity"` + LastHeartbeat time.Time `json:"last_heartbeat"` + Status string `json:"status"` +} + +// S3 Bucket management structures +type S3Bucket struct { + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + Size int64 `json:"size"` + ObjectCount int64 `json:"object_count"` + LastModified time.Time `json:"last_modified"` + Status string `json:"status"` +} + +type S3Object struct { + Key string `json:"key"` + Size int64 `json:"size"` + LastModified time.Time `json:"last_modified"` + ETag string `json:"etag"` + StorageClass string `json:"storage_class"` +} + +type BucketDetails struct { + Bucket S3Bucket `json:"bucket"` + Objects []S3Object `json:"objects"` + TotalSize int64 `json:"total_size"` + TotalCount int64 `json:"total_count"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Cluster management data structures +type ClusterVolumeServersData struct { + Username string `json:"username"` + VolumeServers []VolumeServer `json:"volume_servers"` + TotalVolumeServers int `json:"total_volume_servers"` + TotalVolumes int `json:"total_volumes"` + TotalCapacity int64 `json:"total_capacity"` + LastUpdated time.Time `json:"last_updated"` +} + +type VolumeInfo struct { + ID int `json:"id"` + Server string `json:"server"` + DataCenter string `json:"datacenter"` + Rack string `json:"rack"` + Collection string `json:"collection"` + Size int64 `json:"size"` + FileCount int64 `json:"file_count"` + Replication string `json:"replication"` + Status string `json:"status"` +} + +type ClusterVolumesData struct { + Username string `json:"username"` + Volumes []VolumeInfo `json:"volumes"` + TotalVolumes int `json:"total_volumes"` + TotalSize int64 `json:"total_size"` + LastUpdated time.Time `json:"last_updated"` + + // Pagination + CurrentPage int `json:"current_page"` + TotalPages int `json:"total_pages"` + PageSize int `json:"page_size"` + + // Sorting + SortBy string `json:"sort_by"` + SortOrder string `json:"sort_order"` +} + +type CollectionInfo struct { + Name string `json:"name"` + DataCenter string `json:"datacenter"` + Replication string `json:"replication"` + VolumeCount int `json:"volume_count"` + FileCount int64 `json:"file_count"` + TotalSize int64 `json:"total_size"` + TTL string `json:"ttl"` + DiskType string `json:"disk_type"` + Status string `json:"status"` +} + +type ClusterCollectionsData struct { + Username string `json:"username"` + Collections []CollectionInfo `json:"collections"` + TotalCollections int `json:"total_collections"` + TotalVolumes int `json:"total_volumes"` + TotalFiles int64 `json:"total_files"` + TotalSize int64 `json:"total_size"` + LastUpdated time.Time `json:"last_updated"` +} + +type MasterInfo struct { + Address string `json:"address"` + IsLeader bool `json:"is_leader"` + Status string `json:"status"` + Suffrage string `json:"suffrage"` +} + +type ClusterMastersData struct { + Username string `json:"username"` + Masters []MasterInfo `json:"masters"` + TotalMasters int `json:"total_masters"` + LeaderCount int `json:"leader_count"` + LastUpdated time.Time `json:"last_updated"` +} + +type FilerInfo struct { + Address string `json:"address"` + DataCenter string `json:"datacenter"` + Rack string `json:"rack"` + Version string `json:"version"` + CreatedAt time.Time `json:"created_at"` + Status string `json:"status"` +} + +type ClusterFilersData struct { + Username string `json:"username"` + Filers []FilerInfo `json:"filers"` + TotalFilers int `json:"total_filers"` + LastUpdated time.Time `json:"last_updated"` +} + +func NewAdminServer(masterAddress string, templateFS http.FileSystem) *AdminServer { + return &AdminServer{ + masterAddress: masterAddress, + templateFS: templateFS, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), + cacheExpiration: 10 * time.Second, + filerCacheExpiration: 30 * time.Second, // Cache filers for 30 seconds + } +} + +// GetFilerAddress returns a filer address, discovering from masters if needed +func (s *AdminServer) GetFilerAddress() string { + // Discover filers from masters + filers := s.getDiscoveredFilers() + if len(filers) > 0 { + return filers[0] // Return the first available filer + } + + return "" +} + +// getDiscoveredFilers returns cached filers or discovers them from masters +func (s *AdminServer) getDiscoveredFilers() []string { + // Check if cache is still valid + if time.Since(s.lastFilerUpdate) < s.filerCacheExpiration && len(s.cachedFilers) > 0 { + return s.cachedFilers + } + + // Discover filers from masters + var filers []string + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ + ClientType: cluster.FilerType, + }) + if err != nil { + return err + } + + for _, node := range resp.ClusterNodes { + filers = append(filers, node.Address) + } + + return nil + }) + + if err != nil { + glog.Warningf("Failed to discover filers from master %s: %v", s.masterAddress, err) + // Return cached filers even if expired, better than nothing + return s.cachedFilers + } + + // Update cache + s.cachedFilers = filers + s.lastFilerUpdate = time.Now() + + return filers +} + +// WithMasterClient executes a function with a master client connection +func (s *AdminServer) WithMasterClient(f func(client master_pb.SeaweedClient) error) error { + masterAddr := pb.ServerAddress(s.masterAddress) + + return pb.WithMasterClient(false, masterAddr, s.grpcDialOption, false, func(client master_pb.SeaweedClient) error { + return f(client) + }) +} + +// WithFilerClient executes a function with a filer client connection +func (s *AdminServer) WithFilerClient(f func(client filer_pb.SeaweedFilerClient) error) error { + filerAddr := s.GetFilerAddress() + if filerAddr == "" { + return fmt.Errorf("no filer available") + } + + return pb.WithGrpcFilerClient(false, 0, pb.ServerAddress(filerAddr), s.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return f(client) + }) +} + +// WithVolumeServerClient executes a function with a volume server client connection +func (s *AdminServer) WithVolumeServerClient(address pb.ServerAddress, f func(client volume_server_pb.VolumeServerClient) error) error { + return operation.WithVolumeServerClient(false, address, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return f(client) + }) +} + +// GetClusterTopology returns the current cluster topology with caching +func (s *AdminServer) GetClusterTopology() (*ClusterTopology, error) { + now := time.Now() + if s.cachedTopology != nil && now.Sub(s.lastCacheUpdate) < s.cacheExpiration { + return s.cachedTopology, nil + } + + topology := &ClusterTopology{ + UpdatedAt: now, + } + + // Use gRPC only + err := s.getTopologyViaGRPC(topology) + if err != nil { + glog.Errorf("Failed to connect to master server %s: %v", s.masterAddress, err) + return nil, fmt.Errorf("gRPC topology request failed: %v", err) + } + + // Cache the result + s.cachedTopology = topology + s.lastCacheUpdate = now + + return topology, nil +} + +// getTopologyViaGRPC gets topology using gRPC (original method) +func (s *AdminServer) getTopologyViaGRPC(topology *ClusterTopology) error { + // Get cluster status from master + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + if err != nil { + glog.Errorf("Failed to get volume list from master %s: %v", s.masterAddress, err) + return err + } + + if resp.TopologyInfo != nil { + // Process gRPC response + for _, dc := range resp.TopologyInfo.DataCenterInfos { + dataCenter := DataCenter{ + ID: dc.Id, + Racks: []Rack{}, + } + + for _, rack := range dc.RackInfos { + rackObj := Rack{ + ID: rack.Id, + Nodes: []VolumeServer{}, + } + + for _, node := range rack.DataNodeInfos { + // Calculate totals from disk infos + var totalVolumes int64 + var totalMaxVolumes int64 + var totalSize int64 + var totalFiles int64 + + for _, diskInfo := range node.DiskInfos { + totalVolumes += diskInfo.VolumeCount + totalMaxVolumes += diskInfo.MaxVolumeCount + + // Sum up individual volume information + for _, volInfo := range diskInfo.VolumeInfos { + totalSize += int64(volInfo.Size) + totalFiles += int64(volInfo.FileCount) + } + } + + vs := VolumeServer{ + ID: node.Id, + Address: node.Id, + DataCenter: dc.Id, + Rack: rack.Id, + PublicURL: node.Id, + Volumes: int(totalVolumes), + MaxVolumes: int(totalMaxVolumes), + DiskUsage: totalSize, + DiskCapacity: totalMaxVolumes * int64(resp.VolumeSizeLimitMb) * 1024 * 1024, + LastHeartbeat: time.Now(), + Status: "active", + } + + rackObj.Nodes = append(rackObj.Nodes, vs) + topology.VolumeServers = append(topology.VolumeServers, vs) + topology.TotalVolumes += vs.Volumes + topology.TotalFiles += totalFiles + topology.TotalSize += totalSize + } + + dataCenter.Racks = append(dataCenter.Racks, rackObj) + } + + topology.DataCenters = append(topology.DataCenters, dataCenter) + } + } + + return nil + }) + + return err +} + +// InvalidateCache forces a refresh of cached data +func (s *AdminServer) InvalidateCache() { + s.lastCacheUpdate = time.Time{} + s.cachedTopology = nil + s.lastFilerUpdate = time.Time{} + s.cachedFilers = nil +} + +// GetS3Buckets retrieves all S3 buckets from the filer and collects size/object data from collections +func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) { + var buckets []S3Bucket + + // Build a map of collection name to collection data + collectionMap := make(map[string]struct { + Size int64 + FileCount int64 + }) + + // Collect volume information by collection + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + if err != nil { + return err + } + + if resp.TopologyInfo != nil { + for _, dc := range resp.TopologyInfo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, node := range rack.DataNodeInfos { + for _, diskInfo := range node.DiskInfos { + for _, volInfo := range diskInfo.VolumeInfos { + collection := volInfo.Collection + if collection == "" { + collection = "default" + } + + if _, exists := collectionMap[collection]; !exists { + collectionMap[collection] = struct { + Size int64 + FileCount int64 + }{} + } + + data := collectionMap[collection] + data.Size += int64(volInfo.Size) + data.FileCount += int64(volInfo.FileCount) + collectionMap[collection] = data + } + } + } + } + } + } + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to get volume information: %v", err) + } + + // Get filer configuration to determine FilerGroup + var filerGroup string + err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + configResp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + glog.Warningf("Failed to get filer configuration: %v", err) + // Continue without filer group + return nil + } + filerGroup = configResp.FilerGroup + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to get filer configuration: %v", err) + } + + // Now list buckets from the filer and match with collection data + err = s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + // List buckets by looking at the /buckets directory + stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ + Directory: "/buckets", + Prefix: "", + StartFromFileName: "", + InclusiveStartFrom: false, + Limit: 1000, + }) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + return err + } + + if resp.Entry.IsDirectory { + bucketName := resp.Entry.Name + + // Determine collection name for this bucket + var collectionName string + if filerGroup != "" { + collectionName = fmt.Sprintf("%s_%s", filerGroup, bucketName) + } else { + collectionName = bucketName + } + + // Get size and object count from collection data + var size int64 + var objectCount int64 + if collectionData, exists := collectionMap[collectionName]; exists { + size = collectionData.Size + objectCount = collectionData.FileCount + } + + bucket := S3Bucket{ + Name: bucketName, + CreatedAt: time.Unix(resp.Entry.Attributes.Crtime, 0), + Size: size, + ObjectCount: objectCount, + LastModified: time.Unix(resp.Entry.Attributes.Mtime, 0), + Status: "active", + } + buckets = append(buckets, bucket) + } + } + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to list S3 buckets: %v", err) + } + + return buckets, nil +} + +// GetBucketDetails retrieves detailed information about a specific bucket +func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error) { + bucketPath := fmt.Sprintf("/buckets/%s", bucketName) + + details := &BucketDetails{ + Bucket: S3Bucket{ + Name: bucketName, + Status: "active", + }, + Objects: []S3Object{}, + UpdatedAt: time.Now(), + } + + err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + // Get bucket info + bucketResp, err := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ + Directory: "/buckets", + Name: bucketName, + }) + if err != nil { + return fmt.Errorf("bucket not found: %v", err) + } + + details.Bucket.CreatedAt = time.Unix(bucketResp.Entry.Attributes.Crtime, 0) + details.Bucket.LastModified = time.Unix(bucketResp.Entry.Attributes.Mtime, 0) + + // List objects in bucket (recursively) + return s.listBucketObjects(client, bucketPath, "", details) + }) + + if err != nil { + return nil, err + } + + return details, nil +} + +// listBucketObjects recursively lists all objects in a bucket +func (s *AdminServer) listBucketObjects(client filer_pb.SeaweedFilerClient, directory, prefix string, details *BucketDetails) error { + stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ + Directory: directory, + Prefix: prefix, + StartFromFileName: "", + InclusiveStartFrom: false, + Limit: 1000, + }) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + return err + } + + entry := resp.Entry + if entry.IsDirectory { + // Recursively list subdirectories + subDir := fmt.Sprintf("%s/%s", directory, entry.Name) + err := s.listBucketObjects(client, subDir, "", details) + if err != nil { + return err + } + } else { + // Add file object + objectKey := entry.Name + if directory != fmt.Sprintf("/buckets/%s", details.Bucket.Name) { + // Remove bucket prefix to get relative path + relativePath := directory[len(fmt.Sprintf("/buckets/%s", details.Bucket.Name))+1:] + objectKey = fmt.Sprintf("%s/%s", relativePath, entry.Name) + } + + obj := S3Object{ + Key: objectKey, + Size: int64(entry.Attributes.FileSize), + LastModified: time.Unix(entry.Attributes.Mtime, 0), + ETag: "", // Could be calculated from chunks if needed + StorageClass: "STANDARD", + } + + details.Objects = append(details.Objects, obj) + details.TotalSize += obj.Size + details.TotalCount++ + } + } + + // Update bucket totals + details.Bucket.Size = details.TotalSize + details.Bucket.ObjectCount = details.TotalCount + + return nil +} + +// CreateS3Bucket creates a new S3 bucket +func (s *AdminServer) CreateS3Bucket(bucketName string) error { + return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + // First ensure /buckets directory exists + _, err := client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{ + Directory: "/", + Entry: &filer_pb.Entry{ + Name: "buckets", + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + FileMode: uint32(0755 | os.ModeDir), // Directory mode + Uid: uint32(1000), + Gid: uint32(1000), + Crtime: time.Now().Unix(), + Mtime: time.Now().Unix(), + TtlSec: 0, + }, + }, + }) + // Ignore error if directory already exists + if err != nil && !strings.Contains(err.Error(), "already exists") && !strings.Contains(err.Error(), "existing entry") { + return fmt.Errorf("failed to create /buckets directory: %v", err) + } + + // Check if bucket already exists + _, err = client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ + Directory: "/buckets", + Name: bucketName, + }) + if err == nil { + return fmt.Errorf("bucket %s already exists", bucketName) + } + + // Create bucket directory under /buckets + _, err = client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{ + Directory: "/buckets", + Entry: &filer_pb.Entry{ + Name: bucketName, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + FileMode: uint32(0755 | os.ModeDir), // Directory mode + Uid: uint32(1000), + Gid: uint32(1000), + Crtime: time.Now().Unix(), + Mtime: time.Now().Unix(), + TtlSec: 0, + }, + }, + }) + if err != nil { + return fmt.Errorf("failed to create bucket directory: %v", err) + } + + return nil + }) +} + +// DeleteS3Bucket deletes an S3 bucket and all its contents +func (s *AdminServer) DeleteS3Bucket(bucketName string) error { + return s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + // Delete bucket directory recursively + _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ + Directory: "/buckets", + Name: bucketName, + IsDeleteData: true, + IsRecursive: true, + IgnoreRecursiveError: false, + }) + if err != nil { + return fmt.Errorf("failed to delete bucket: %v", err) + } + + return nil + }) +} + +// GetObjectStoreUsers retrieves object store users data +func (s *AdminServer) GetObjectStoreUsers() ([]ObjectStoreUser, error) { + // For now, return mock data since SeaweedFS doesn't have built-in user management + // In a real implementation, this would query the IAM system or user database + users := []ObjectStoreUser{ + { + Username: "admin", + Email: "admin@example.com", + AccessKey: "AKIAIOSFODNN7EXAMPLE", + SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + Status: "active", + CreatedAt: time.Now().AddDate(0, -1, 0), + LastLogin: time.Now().AddDate(0, 0, -1), + Permissions: []string{"s3:*", "iam:*"}, + }, + { + Username: "readonly", + Email: "readonly@example.com", + AccessKey: "AKIAI44QH8DHBEXAMPLE", + SecretKey: "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY", + Status: "active", + CreatedAt: time.Now().AddDate(0, -2, 0), + LastLogin: time.Now().AddDate(0, 0, -3), + Permissions: []string{"s3:GetObject", "s3:ListBucket"}, + }, + { + Username: "backup", + Email: "backup@example.com", + AccessKey: "AKIAIGCEVSQ6C2EXAMPLE", + SecretKey: "BnL1dIqRF/+WoWcouZ5e3qthJhEXAMPLEKEY", + Status: "inactive", + CreatedAt: time.Now().AddDate(0, -3, 0), + LastLogin: time.Now().AddDate(0, -1, -15), + Permissions: []string{"s3:PutObject", "s3:GetObject"}, + }, + } + + return users, nil +} + +// GetClusterVolumeServers retrieves cluster volume servers data +func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, error) { + topology, err := s.GetClusterTopology() + if err != nil { + return nil, err + } + + var totalCapacity int64 + var totalVolumes int + for _, vs := range topology.VolumeServers { + totalCapacity += vs.DiskCapacity + totalVolumes += vs.Volumes + } + + return &ClusterVolumeServersData{ + VolumeServers: topology.VolumeServers, + TotalVolumeServers: len(topology.VolumeServers), + TotalVolumes: totalVolumes, + TotalCapacity: totalCapacity, + LastUpdated: time.Now(), + }, nil +} + +// GetClusterVolumes retrieves cluster volumes data with pagination and sorting +func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, sortOrder string) (*ClusterVolumesData, error) { + // Set defaults + if page < 1 { + page = 1 + } + if pageSize < 1 || pageSize > 1000 { + pageSize = 100 + } + if sortBy == "" { + sortBy = "id" + } + if sortOrder == "" { + sortOrder = "asc" + } + var volumes []VolumeInfo + var totalSize int64 + volumeID := 1 + + // Get detailed volume information via gRPC + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + if err != nil { + return err + } + + if resp.TopologyInfo != nil { + for _, dc := range resp.TopologyInfo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, node := range rack.DataNodeInfos { + for _, diskInfo := range node.DiskInfos { + for _, volInfo := range diskInfo.VolumeInfos { + // Extract collection name from volume info + collectionName := volInfo.Collection + if collectionName == "" { + collectionName = "default" // Default collection for volumes without explicit collection + } + + volume := VolumeInfo{ + ID: volumeID, + Server: node.Id, + DataCenter: dc.Id, + Rack: rack.Id, + Collection: collectionName, + Size: int64(volInfo.Size), + FileCount: int64(volInfo.FileCount), + Replication: fmt.Sprintf("%03d", volInfo.ReplicaPlacement), + Status: "active", + } + volumes = append(volumes, volume) + totalSize += volume.Size + volumeID++ + } + } + } + } + } + } + + return nil + }) + + if err != nil { + return nil, err + } + + // Sort volumes + s.sortVolumes(volumes, sortBy, sortOrder) + + // Calculate pagination + totalVolumes := len(volumes) + totalPages := (totalVolumes + pageSize - 1) / pageSize + if totalPages == 0 { + totalPages = 1 + } + + // Apply pagination + startIndex := (page - 1) * pageSize + endIndex := startIndex + pageSize + if startIndex >= totalVolumes { + volumes = []VolumeInfo{} + } else { + if endIndex > totalVolumes { + endIndex = totalVolumes + } + volumes = volumes[startIndex:endIndex] + } + + return &ClusterVolumesData{ + Volumes: volumes, + TotalVolumes: totalVolumes, + TotalSize: totalSize, + LastUpdated: time.Now(), + CurrentPage: page, + TotalPages: totalPages, + PageSize: pageSize, + SortBy: sortBy, + SortOrder: sortOrder, + }, nil +} + +// sortVolumes sorts the volumes slice based on the specified field and order +func (s *AdminServer) sortVolumes(volumes []VolumeInfo, sortBy string, sortOrder string) { + sort.Slice(volumes, func(i, j int) bool { + var less bool + + switch sortBy { + case "id": + less = volumes[i].ID < volumes[j].ID + case "server": + less = volumes[i].Server < volumes[j].Server + case "datacenter": + less = volumes[i].DataCenter < volumes[j].DataCenter + case "rack": + less = volumes[i].Rack < volumes[j].Rack + case "collection": + less = volumes[i].Collection < volumes[j].Collection + case "size": + less = volumes[i].Size < volumes[j].Size + case "filecount": + less = volumes[i].FileCount < volumes[j].FileCount + case "replication": + less = volumes[i].Replication < volumes[j].Replication + case "status": + less = volumes[i].Status < volumes[j].Status + default: + less = volumes[i].ID < volumes[j].ID + } + + if sortOrder == "desc" { + return !less + } + return less + }) +} + +// GetClusterCollections retrieves cluster collections data +func (s *AdminServer) GetClusterCollections() (*ClusterCollectionsData, error) { + var collections []CollectionInfo + var totalVolumes int + var totalFiles int64 + var totalSize int64 + collectionMap := make(map[string]*CollectionInfo) + + // Get actual collection information from volume data + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + if err != nil { + return err + } + + if resp.TopologyInfo != nil { + for _, dc := range resp.TopologyInfo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, node := range rack.DataNodeInfos { + for _, diskInfo := range node.DiskInfos { + for _, volInfo := range diskInfo.VolumeInfos { + // Extract collection name from volume info + collectionName := volInfo.Collection + if collectionName == "" { + collectionName = "default" // Default collection for volumes without explicit collection + } + + // Get or create collection info + if collection, exists := collectionMap[collectionName]; exists { + collection.VolumeCount++ + collection.FileCount += int64(volInfo.FileCount) + collection.TotalSize += int64(volInfo.Size) + + // Update data center if this collection spans multiple DCs + if collection.DataCenter != dc.Id && collection.DataCenter != "multi" { + collection.DataCenter = "multi" + } + + totalVolumes++ + totalFiles += int64(volInfo.FileCount) + totalSize += int64(volInfo.Size) + } else { + // Format TTL properly + var ttlStr string + if volInfo.Ttl > 0 { + ttlStr = fmt.Sprintf("%ds", volInfo.Ttl) + } else { + ttlStr = "" + } + + newCollection := CollectionInfo{ + Name: collectionName, + DataCenter: dc.Id, + Replication: fmt.Sprintf("%03d", volInfo.ReplicaPlacement), + VolumeCount: 1, + FileCount: int64(volInfo.FileCount), + TotalSize: int64(volInfo.Size), + TTL: ttlStr, + DiskType: "hdd", // Default disk type + Status: "active", + } + collectionMap[collectionName] = &newCollection + totalVolumes++ + totalFiles += int64(volInfo.FileCount) + totalSize += int64(volInfo.Size) + } + } + } + } + } + } + } + + return nil + }) + + if err != nil { + return nil, err + } + + // Convert map to slice + for _, collection := range collectionMap { + collections = append(collections, *collection) + } + + // If no collections found, show a message indicating no collections exist + if len(collections) == 0 { + // Return empty collections data instead of creating fake ones + return &ClusterCollectionsData{ + Collections: []CollectionInfo{}, + TotalCollections: 0, + TotalVolumes: 0, + TotalFiles: 0, + TotalSize: 0, + LastUpdated: time.Now(), + }, nil + } + + return &ClusterCollectionsData{ + Collections: collections, + TotalCollections: len(collections), + TotalVolumes: totalVolumes, + TotalFiles: totalFiles, + TotalSize: totalSize, + LastUpdated: time.Now(), + }, nil +} + +// GetClusterMasters retrieves cluster masters data +func (s *AdminServer) GetClusterMasters() (*ClusterMastersData, error) { + var masters []MasterInfo + var leaderCount int + + // First, get master information from topology + topology, err := s.GetClusterTopology() + if err != nil { + return nil, err + } + + // Create a map to merge topology and raft data + masterMap := make(map[string]*MasterInfo) + + // Add masters from topology + for _, master := range topology.Masters { + masterInfo := &MasterInfo{ + Address: master.Address, + IsLeader: master.IsLeader, + Status: master.Status, + Suffrage: "", + } + + if master.IsLeader { + leaderCount++ + } + + masterMap[master.Address] = masterInfo + } + + // Then, get additional master information from Raft cluster + err = s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.RaftListClusterServers(context.Background(), &master_pb.RaftListClusterServersRequest{}) + if err != nil { + return err + } + + // Process each raft server + for _, server := range resp.ClusterServers { + address := server.Address + + // Update existing master info or create new one + if masterInfo, exists := masterMap[address]; exists { + // Update existing master with raft data + masterInfo.IsLeader = server.IsLeader + masterInfo.Suffrage = server.Suffrage + masterInfo.Status = "active" // If it's in raft cluster, it's active + } else { + // Create new master info from raft data + masterInfo := &MasterInfo{ + Address: address, + IsLeader: server.IsLeader, + Status: "active", + Suffrage: server.Suffrage, + } + masterMap[address] = masterInfo + } + + if server.IsLeader { + // Update leader count based on raft data + leaderCount = 1 // There should only be one leader + } + } + + return nil + }) + + if err != nil { + // If gRPC call fails, log the error but continue with topology data + glog.Errorf("Failed to get raft cluster servers from master %s: %v", s.masterAddress, err) + } + + // Convert map to slice + for _, masterInfo := range masterMap { + masters = append(masters, *masterInfo) + } + + // If no masters found at all, add the configured master as fallback + if len(masters) == 0 { + masters = append(masters, MasterInfo{ + Address: s.masterAddress, + IsLeader: true, + Status: "active", + Suffrage: "Voter", + }) + leaderCount = 1 + } + + return &ClusterMastersData{ + Masters: masters, + TotalMasters: len(masters), + LeaderCount: leaderCount, + LastUpdated: time.Now(), + }, nil +} + +// GetClusterFilers retrieves cluster filers data +func (s *AdminServer) GetClusterFilers() (*ClusterFilersData, error) { + var filers []FilerInfo + + // Get filer information from master using ListClusterNodes + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ + ClientType: cluster.FilerType, + }) + if err != nil { + return err + } + + // Process each filer node + for _, node := range resp.ClusterNodes { + createdAt := time.Unix(0, node.CreatedAtNs) + + filerInfo := FilerInfo{ + Address: node.Address, + DataCenter: node.DataCenter, + Rack: node.Rack, + Version: node.Version, + CreatedAt: createdAt, + Status: "active", // If it's in the cluster list, it's considered active + } + + filers = append(filers, filerInfo) + } + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to get filer nodes from master: %v", err) + } + + return &ClusterFilersData{ + Filers: filers, + TotalFilers: len(filers), + LastUpdated: time.Now(), + }, nil +} + +// GetAllFilers returns all discovered filers +func (s *AdminServer) GetAllFilers() []string { + return s.getDiscoveredFilers() +} diff --git a/weed/admin/dash/file_browser.go b/weed/admin/dash/file_browser.go new file mode 100644 index 000000000..089c2eadc --- /dev/null +++ b/weed/admin/dash/file_browser.go @@ -0,0 +1,350 @@ +package dash + +import ( + "context" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" +) + +// FileEntry represents a file or directory entry in the file browser +type FileEntry struct { + Name string `json:"name"` + FullPath string `json:"full_path"` + IsDirectory bool `json:"is_directory"` + Size int64 `json:"size"` + ModTime time.Time `json:"mod_time"` + Mode string `json:"mode"` + Uid uint32 `json:"uid"` + Gid uint32 `json:"gid"` + Mime string `json:"mime"` + Replication string `json:"replication"` + Collection string `json:"collection"` + TtlSec int32 `json:"ttl_sec"` +} + +// BreadcrumbItem represents a single breadcrumb in the navigation +type BreadcrumbItem struct { + Name string `json:"name"` + Path string `json:"path"` +} + +// FileBrowserData contains all data needed for the file browser view +type FileBrowserData struct { + Username string `json:"username"` + CurrentPath string `json:"current_path"` + ParentPath string `json:"parent_path"` + Breadcrumbs []BreadcrumbItem `json:"breadcrumbs"` + Entries []FileEntry `json:"entries"` + TotalEntries int `json:"total_entries"` + TotalSize int64 `json:"total_size"` + LastUpdated time.Time `json:"last_updated"` + IsBucketPath bool `json:"is_bucket_path"` + BucketName string `json:"bucket_name"` +} + +// GetFileBrowser retrieves file browser data for a given path +func (s *AdminServer) GetFileBrowser(path string) (*FileBrowserData, error) { + if path == "" { + path = "/" + } + + var entries []FileEntry + var totalSize int64 + + // Get directory listing from filer + err := s.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ + Directory: path, + Prefix: "", + Limit: 1000, + InclusiveStartFrom: false, + }) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + return err + } + + entry := resp.Entry + if entry == nil { + continue + } + + fullPath := path + if !strings.HasSuffix(fullPath, "/") { + fullPath += "/" + } + fullPath += entry.Name + + var modTime time.Time + if entry.Attributes != nil && entry.Attributes.Mtime > 0 { + modTime = time.Unix(entry.Attributes.Mtime, 0) + } + + var mode string + var uid, gid uint32 + var size int64 + var replication, collection string + var ttlSec int32 + + if entry.Attributes != nil { + mode = formatFileMode(entry.Attributes.FileMode) + uid = entry.Attributes.Uid + gid = entry.Attributes.Gid + size = int64(entry.Attributes.FileSize) + ttlSec = entry.Attributes.TtlSec + } + + // Get replication and collection from entry extended attributes or chunks + if entry.Extended != nil { + if repl, ok := entry.Extended["replication"]; ok { + replication = string(repl) + } + if coll, ok := entry.Extended["collection"]; ok { + collection = string(coll) + } + } + + // Determine MIME type based on file extension + mime := "application/octet-stream" + if entry.IsDirectory { + mime = "inode/directory" + } else { + ext := strings.ToLower(filepath.Ext(entry.Name)) + switch ext { + case ".txt", ".log": + mime = "text/plain" + case ".html", ".htm": + mime = "text/html" + case ".css": + mime = "text/css" + case ".js": + mime = "application/javascript" + case ".json": + mime = "application/json" + case ".xml": + mime = "application/xml" + case ".pdf": + mime = "application/pdf" + case ".jpg", ".jpeg": + mime = "image/jpeg" + case ".png": + mime = "image/png" + case ".gif": + mime = "image/gif" + case ".svg": + mime = "image/svg+xml" + case ".mp4": + mime = "video/mp4" + case ".mp3": + mime = "audio/mpeg" + case ".zip": + mime = "application/zip" + case ".tar": + mime = "application/x-tar" + case ".gz": + mime = "application/gzip" + } + } + + fileEntry := FileEntry{ + Name: entry.Name, + FullPath: fullPath, + IsDirectory: entry.IsDirectory, + Size: size, + ModTime: modTime, + Mode: mode, + Uid: uid, + Gid: gid, + Mime: mime, + Replication: replication, + Collection: collection, + TtlSec: ttlSec, + } + + entries = append(entries, fileEntry) + if !entry.IsDirectory { + totalSize += size + } + } + + return nil + }) + + if err != nil { + return nil, err + } + + // Sort entries: directories first, then files, both alphabetically + sort.Slice(entries, func(i, j int) bool { + if entries[i].IsDirectory != entries[j].IsDirectory { + return entries[i].IsDirectory + } + return strings.ToLower(entries[i].Name) < strings.ToLower(entries[j].Name) + }) + + // Generate breadcrumbs + breadcrumbs := s.generateBreadcrumbs(path) + + // Calculate parent path + parentPath := "/" + if path != "/" { + parentPath = filepath.Dir(path) + if parentPath == "." { + parentPath = "/" + } + } + + // Check if this is a bucket path + isBucketPath := false + bucketName := "" + if strings.HasPrefix(path, "/buckets/") { + isBucketPath = true + pathParts := strings.Split(strings.Trim(path, "/"), "/") + if len(pathParts) >= 2 { + bucketName = pathParts[1] + } + } + + return &FileBrowserData{ + CurrentPath: path, + ParentPath: parentPath, + Breadcrumbs: breadcrumbs, + Entries: entries, + TotalEntries: len(entries), + TotalSize: totalSize, + LastUpdated: time.Now(), + IsBucketPath: isBucketPath, + BucketName: bucketName, + }, nil +} + +// generateBreadcrumbs creates breadcrumb navigation for the current path +func (s *AdminServer) generateBreadcrumbs(path string) []BreadcrumbItem { + var breadcrumbs []BreadcrumbItem + + // Always start with root + breadcrumbs = append(breadcrumbs, BreadcrumbItem{ + Name: "Root", + Path: "/", + }) + + if path == "/" { + return breadcrumbs + } + + // Split path and build breadcrumbs + parts := strings.Split(strings.Trim(path, "/"), "/") + currentPath := "" + + for _, part := range parts { + if part == "" { + continue + } + currentPath += "/" + part + + // Special handling for bucket paths + displayName := part + if len(breadcrumbs) == 1 && part == "buckets" { + displayName = "S3 Buckets" + } else if len(breadcrumbs) == 2 && strings.HasPrefix(path, "/buckets/") { + displayName = "📦 " + part // Add bucket icon to bucket name + } + + breadcrumbs = append(breadcrumbs, BreadcrumbItem{ + Name: displayName, + Path: currentPath, + }) + } + + return breadcrumbs +} + +// formatFileMode converts file mode to Unix-style string representation (e.g., "drwxr-xr-x") +func formatFileMode(mode uint32) string { + var result []byte = make([]byte, 10) + + // File type + switch mode & 0170000 { // S_IFMT mask + case 0040000: // S_IFDIR + result[0] = 'd' + case 0100000: // S_IFREG + result[0] = '-' + case 0120000: // S_IFLNK + result[0] = 'l' + case 0020000: // S_IFCHR + result[0] = 'c' + case 0060000: // S_IFBLK + result[0] = 'b' + case 0010000: // S_IFIFO + result[0] = 'p' + case 0140000: // S_IFSOCK + result[0] = 's' + default: + result[0] = '-' // S_IFREG is default + } + + // Owner permissions + if mode&0400 != 0 { // S_IRUSR + result[1] = 'r' + } else { + result[1] = '-' + } + if mode&0200 != 0 { // S_IWUSR + result[2] = 'w' + } else { + result[2] = '-' + } + if mode&0100 != 0 { // S_IXUSR + result[3] = 'x' + } else { + result[3] = '-' + } + + // Group permissions + if mode&0040 != 0 { // S_IRGRP + result[4] = 'r' + } else { + result[4] = '-' + } + if mode&0020 != 0 { // S_IWGRP + result[5] = 'w' + } else { + result[5] = '-' + } + if mode&0010 != 0 { // S_IXGRP + result[6] = 'x' + } else { + result[6] = '-' + } + + // Other permissions + if mode&0004 != 0 { // S_IROTH + result[7] = 'r' + } else { + result[7] = '-' + } + if mode&0002 != 0 { // S_IWOTH + result[8] = 'w' + } else { + result[8] = '-' + } + if mode&0001 != 0 { // S_IXOTH + result[9] = 'x' + } else { + result[9] = '-' + } + + return string(result) +} diff --git a/weed/admin/dash/handler_admin.go b/weed/admin/dash/handler_admin.go new file mode 100644 index 000000000..53eb54ec9 --- /dev/null +++ b/weed/admin/dash/handler_admin.go @@ -0,0 +1,373 @@ +package dash + +import ( + "context" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/seaweedfs/seaweedfs/weed/cluster" + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" +) + +type AdminData struct { + Username string `json:"username"` + ClusterStatus string `json:"cluster_status"` + TotalVolumes int `json:"total_volumes"` + TotalFiles int64 `json:"total_files"` + TotalSize int64 `json:"total_size"` + MasterNodes []MasterNode `json:"master_nodes"` + VolumeServers []VolumeServer `json:"volume_servers"` + FilerNodes []FilerNode `json:"filer_nodes"` + DataCenters []DataCenter `json:"datacenters"` + LastUpdated time.Time `json:"last_updated"` + SystemHealth string `json:"system_health"` +} + +// S3 Bucket management data structures for templates +type S3BucketsData struct { + Username string `json:"username"` + Buckets []S3Bucket `json:"buckets"` + TotalBuckets int `json:"total_buckets"` + TotalSize int64 `json:"total_size"` + LastUpdated time.Time `json:"last_updated"` +} + +type CreateBucketRequest struct { + Name string `json:"name" binding:"required"` + Region string `json:"region"` +} + +// Object Store Users management structures +type ObjectStoreUser struct { + Username string `json:"username"` + Email string `json:"email"` + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + LastLogin time.Time `json:"last_login"` + Permissions []string `json:"permissions"` +} + +type ObjectStoreUsersData struct { + Username string `json:"username"` + Users []ObjectStoreUser `json:"users"` + TotalUsers int `json:"total_users"` + LastUpdated time.Time `json:"last_updated"` +} + +type FilerNode struct { + Address string `json:"address"` + DataCenter string `json:"datacenter"` + Rack string `json:"rack"` + Status string `json:"status"` + LastUpdated time.Time `json:"last_updated"` +} + +// GetAdminData retrieves admin data as a struct (for reuse by both JSON and HTML handlers) +func (s *AdminServer) GetAdminData(username string) (AdminData, error) { + if username == "" { + username = "admin" + } + + // Get cluster topology + topology, err := s.GetClusterTopology() + if err != nil { + glog.Errorf("Failed to get cluster topology: %v", err) + return AdminData{}, err + } + + // Get master nodes status + masterNodes := s.getMasterNodesStatus() + + // Get filer nodes status + filerNodes := s.getFilerNodesStatus() + + // Prepare admin data + adminData := AdminData{ + Username: username, + ClusterStatus: s.determineClusterStatus(topology, masterNodes), + TotalVolumes: topology.TotalVolumes, + TotalFiles: topology.TotalFiles, + TotalSize: topology.TotalSize, + MasterNodes: masterNodes, + VolumeServers: topology.VolumeServers, + FilerNodes: filerNodes, + DataCenters: topology.DataCenters, + LastUpdated: topology.UpdatedAt, + SystemHealth: s.determineSystemHealth(topology, masterNodes), + } + + return adminData, nil +} + +// ShowAdmin displays the main admin page (now uses GetAdminData) +func (s *AdminServer) ShowAdmin(c *gin.Context) { + username := c.GetString("username") + + adminData, err := s.GetAdminData(username) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get admin data: " + err.Error()}) + return + } + + // Return JSON for API calls + c.JSON(http.StatusOK, adminData) +} + +// ShowOverview displays cluster overview +func (s *AdminServer) ShowOverview(c *gin.Context) { + topology, err := s.GetClusterTopology() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, topology) +} + +// S3 Bucket Management Handlers + +// ShowS3Buckets displays the S3 buckets management page +func (s *AdminServer) ShowS3Buckets(c *gin.Context) { + username := c.GetString("username") + + buckets, err := s.GetS3Buckets() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get S3 buckets: " + err.Error()}) + return + } + + // Calculate totals + var totalSize int64 + for _, bucket := range buckets { + totalSize += bucket.Size + } + + data := S3BucketsData{ + Username: username, + Buckets: buckets, + TotalBuckets: len(buckets), + TotalSize: totalSize, + LastUpdated: time.Now(), + } + + c.JSON(http.StatusOK, data) +} + +// ShowBucketDetails displays detailed information about a specific bucket +func (s *AdminServer) ShowBucketDetails(c *gin.Context) { + bucketName := c.Param("bucket") + if bucketName == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name is required"}) + return + } + + details, err := s.GetBucketDetails(bucketName) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get bucket details: " + err.Error()}) + return + } + + c.JSON(http.StatusOK, details) +} + +// CreateBucket creates a new S3 bucket +func (s *AdminServer) CreateBucket(c *gin.Context) { + var req CreateBucketRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) + return + } + + // Validate bucket name (basic validation) + if len(req.Name) < 3 || len(req.Name) > 63 { + c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name must be between 3 and 63 characters"}) + return + } + + err := s.CreateS3Bucket(req.Name) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create bucket: " + err.Error()}) + return + } + + c.JSON(http.StatusCreated, gin.H{ + "message": "Bucket created successfully", + "bucket": req.Name, + }) +} + +// DeleteBucket deletes an S3 bucket +func (s *AdminServer) DeleteBucket(c *gin.Context) { + bucketName := c.Param("bucket") + if bucketName == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "Bucket name is required"}) + return + } + + err := s.DeleteS3Bucket(bucketName) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete bucket: " + err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{ + "message": "Bucket deleted successfully", + "bucket": bucketName, + }) +} + +// ListBucketsAPI returns buckets as JSON API +func (s *AdminServer) ListBucketsAPI(c *gin.Context) { + buckets, err := s.GetS3Buckets() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{ + "buckets": buckets, + "count": len(buckets), + }) +} + +// getMasterNodesStatus checks status of all master nodes +func (s *AdminServer) getMasterNodesStatus() []MasterNode { + var masterNodes []MasterNode + + // Since we have a single master address, create one entry + var isLeader bool = true // Assume leader since it's the only master we know about + var status string + + // Try to get leader info from this master + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + _, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return err + } + // For now, assume this master is the leader since we can connect to it + isLeader = true + return nil + }) + + if err != nil { + status = "unreachable" + isLeader = false + } else { + status = "active" + } + + masterNodes = append(masterNodes, MasterNode{ + Address: s.masterAddress, + IsLeader: isLeader, + Status: status, + }) + + return masterNodes +} + +// getFilerNodesStatus checks status of all filer nodes using master's ListClusterNodes +func (s *AdminServer) getFilerNodesStatus() []FilerNode { + var filerNodes []FilerNode + + // Get filer nodes from master using ListClusterNodes + err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { + resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{ + ClientType: cluster.FilerType, + }) + if err != nil { + return err + } + + // Process each filer node + for _, node := range resp.ClusterNodes { + filerNodes = append(filerNodes, FilerNode{ + Address: node.Address, + DataCenter: node.DataCenter, + Rack: node.Rack, + Status: "active", // If it's in the cluster list, it's considered active + LastUpdated: time.Now(), + }) + } + + return nil + }) + + if err != nil { + glog.Errorf("Failed to get filer nodes from master %s: %v", s.masterAddress, err) + // Return empty list if we can't get filer info from master + return []FilerNode{} + } + + return filerNodes +} + +// determineClusterStatus analyzes cluster health +func (s *AdminServer) determineClusterStatus(topology *ClusterTopology, masters []MasterNode) string { + // Check if we have an active leader + hasActiveLeader := false + for _, master := range masters { + if master.IsLeader && master.Status == "active" { + hasActiveLeader = true + break + } + } + + if !hasActiveLeader { + return "critical" + } + + // Check volume server health + activeServers := 0 + for _, vs := range topology.VolumeServers { + if vs.Status == "active" { + activeServers++ + } + } + + if activeServers == 0 { + return "critical" + } else if activeServers < len(topology.VolumeServers) { + return "warning" + } + + return "healthy" +} + +// determineSystemHealth provides overall system health assessment +func (s *AdminServer) determineSystemHealth(topology *ClusterTopology, masters []MasterNode) string { + // Simple health calculation based on active components + totalComponents := len(masters) + len(topology.VolumeServers) + activeComponents := 0 + + for _, master := range masters { + if master.Status == "active" { + activeComponents++ + } + } + + for _, vs := range topology.VolumeServers { + if vs.Status == "active" { + activeComponents++ + } + } + + if totalComponents == 0 { + return "unknown" + } + + healthPercent := float64(activeComponents) / float64(totalComponents) * 100 + + if healthPercent >= 95 { + return "excellent" + } else if healthPercent >= 80 { + return "good" + } else if healthPercent >= 60 { + return "fair" + } else { + return "poor" + } +} diff --git a/weed/admin/dash/handler_auth.go b/weed/admin/dash/handler_auth.go new file mode 100644 index 000000000..c0b7d5636 --- /dev/null +++ b/weed/admin/dash/handler_auth.go @@ -0,0 +1,128 @@ +package dash + +import ( + "net/http" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// ShowLogin displays the login page +func (s *AdminServer) ShowLogin(c *gin.Context) { + // If authentication is not required, redirect to admin + session := sessions.Default(c) + if session.Get("authenticated") == true { + c.Redirect(http.StatusSeeOther, "/admin") + return + } + + // For now, return a simple login form as JSON + c.HTML(http.StatusOK, "login.html", gin.H{ + "title": "SeaweedFS Admin Login", + "error": c.Query("error"), + }) +} + +// HandleLogin handles login form submission +func (s *AdminServer) HandleLogin(username, password string) gin.HandlerFunc { + return func(c *gin.Context) { + loginUsername := c.PostForm("username") + loginPassword := c.PostForm("password") + + if loginUsername == username && loginPassword == password { + session := sessions.Default(c) + session.Set("authenticated", true) + session.Set("username", loginUsername) + session.Save() + + c.Redirect(http.StatusSeeOther, "/admin") + return + } + + // Authentication failed + c.Redirect(http.StatusSeeOther, "/login?error=Invalid credentials") + } +} + +// HandleLogout handles user logout +func (s *AdminServer) HandleLogout(c *gin.Context) { + session := sessions.Default(c) + session.Clear() + session.Save() + c.Redirect(http.StatusSeeOther, "/login") +} + +// Additional methods for admin functionality +func (s *AdminServer) GetClusterTopologyHandler(c *gin.Context) { + topology, err := s.GetClusterTopology() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, topology) +} + +func (s *AdminServer) GetMasters(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"masters": []string{s.masterAddress}}) +} + +func (s *AdminServer) GetVolumeServers(c *gin.Context) { + topology, err := s.GetClusterTopology() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, gin.H{"volume_servers": topology.VolumeServers}) +} + +func (s *AdminServer) AssignVolume(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Volume assignment not yet implemented"}) +} + +func (s *AdminServer) ListVolumes(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Volume listing not yet implemented"}) +} + +func (s *AdminServer) CreateVolume(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Volume creation not yet implemented"}) +} + +func (s *AdminServer) DeleteVolume(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Volume deletion not yet implemented"}) +} + +func (s *AdminServer) ReplicateVolume(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Volume replication not yet implemented"}) +} + +func (s *AdminServer) BrowseFiles(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "File browsing not yet implemented"}) +} + +func (s *AdminServer) UploadFile(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "File upload not yet implemented"}) +} + +func (s *AdminServer) DeleteFile(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "File deletion not yet implemented"}) +} + +func (s *AdminServer) ShowMetrics(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Metrics display not yet implemented"}) +} + +func (s *AdminServer) GetMetricsData(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Metrics data not yet implemented"}) +} + +func (s *AdminServer) TriggerGC(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Garbage collection not yet implemented"}) +} + +func (s *AdminServer) CompactVolumes(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Volume compaction not yet implemented"}) +} + +func (s *AdminServer) GetMaintenanceStatus(c *gin.Context) { + c.JSON(http.StatusNotImplemented, gin.H{"message": "Maintenance status not yet implemented"}) +} diff --git a/weed/admin/dash/middleware.go b/weed/admin/dash/middleware.go new file mode 100644 index 000000000..ce538d7ca --- /dev/null +++ b/weed/admin/dash/middleware.go @@ -0,0 +1,27 @@ +package dash + +import ( + "net/http" + + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// RequireAuth checks if user is authenticated +func RequireAuth() gin.HandlerFunc { + return func(c *gin.Context) { + session := sessions.Default(c) + authenticated := session.Get("authenticated") + username := session.Get("username") + + if authenticated != true || username == nil { + c.Redirect(http.StatusTemporaryRedirect, "/login") + c.Abort() + return + } + + // Set username in context for use in handlers + c.Set("username", username) + c.Next() + } +} diff --git a/weed/admin/handlers/auth.go b/weed/admin/handlers/auth.go new file mode 100644 index 000000000..07596b8e4 --- /dev/null +++ b/weed/admin/handlers/auth.go @@ -0,0 +1,45 @@ +package handlers + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" + "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" +) + +// AuthHandlers contains authentication-related HTTP handlers +type AuthHandlers struct { + adminServer *dash.AdminServer +} + +// NewAuthHandlers creates a new instance of AuthHandlers +func NewAuthHandlers(adminServer *dash.AdminServer) *AuthHandlers { + return &AuthHandlers{ + adminServer: adminServer, + } +} + +// ShowLogin displays the login page +func (a *AuthHandlers) ShowLogin(c *gin.Context) { + errorMessage := c.Query("error") + + // Render login template + c.Header("Content-Type", "text/html") + loginComponent := layout.LoginForm(c, "SeaweedFS Admin", errorMessage) + err := loginComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render login template: " + err.Error()}) + return + } +} + +// HandleLogin handles login form submission +func (a *AuthHandlers) HandleLogin(username, password string) gin.HandlerFunc { + return a.adminServer.HandleLogin(username, password) +} + +// HandleLogout handles user logout +func (a *AuthHandlers) HandleLogout(c *gin.Context) { + a.adminServer.HandleLogout(c) +} diff --git a/weed/admin/handlers/cluster_handlers.go b/weed/admin/handlers/cluster_handlers.go new file mode 100644 index 000000000..515cdaecb --- /dev/null +++ b/weed/admin/handlers/cluster_handlers.go @@ -0,0 +1,202 @@ +package handlers + +import ( + "net/http" + "strconv" + + "github.com/gin-gonic/gin" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" + "github.com/seaweedfs/seaweedfs/weed/admin/view/app" + "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" +) + +// ClusterHandlers contains all the HTTP handlers for cluster management +type ClusterHandlers struct { + adminServer *dash.AdminServer +} + +// NewClusterHandlers creates a new instance of ClusterHandlers +func NewClusterHandlers(adminServer *dash.AdminServer) *ClusterHandlers { + return &ClusterHandlers{ + adminServer: adminServer, + } +} + +// ShowClusterVolumeServers renders the cluster volume servers page +func (h *ClusterHandlers) ShowClusterVolumeServers(c *gin.Context) { + // Get cluster volume servers data + volumeServersData, err := h.adminServer.GetClusterVolumeServers() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster volume servers: " + err.Error()}) + return + } + + // Set username + username := c.GetString("username") + if username == "" { + username = "admin" + } + volumeServersData.Username = username + + // Render HTML template + c.Header("Content-Type", "text/html") + volumeServersComponent := app.ClusterVolumeServers(*volumeServersData) + layoutComponent := layout.Layout(c, volumeServersComponent) + err = layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// ShowClusterVolumes renders the cluster volumes page +func (h *ClusterHandlers) ShowClusterVolumes(c *gin.Context) { + // Get pagination and sorting parameters from query string + page := 1 + if p := c.Query("page"); p != "" { + if parsed, err := strconv.Atoi(p); err == nil && parsed > 0 { + page = parsed + } + } + + pageSize := 100 + if ps := c.Query("pageSize"); ps != "" { + if parsed, err := strconv.Atoi(ps); err == nil && parsed > 0 && parsed <= 1000 { + pageSize = parsed + } + } + + sortBy := c.DefaultQuery("sortBy", "id") + sortOrder := c.DefaultQuery("sortOrder", "asc") + + // Get cluster volumes data + volumesData, err := h.adminServer.GetClusterVolumes(page, pageSize, sortBy, sortOrder) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster volumes: " + err.Error()}) + return + } + + // Set username + username := c.GetString("username") + if username == "" { + username = "admin" + } + volumesData.Username = username + + // Render HTML template + c.Header("Content-Type", "text/html") + volumesComponent := app.ClusterVolumes(*volumesData) + layoutComponent := layout.Layout(c, volumesComponent) + err = layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// ShowClusterCollections renders the cluster collections page +func (h *ClusterHandlers) ShowClusterCollections(c *gin.Context) { + // Get cluster collections data + collectionsData, err := h.adminServer.GetClusterCollections() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster collections: " + err.Error()}) + return + } + + // Set username + username := c.GetString("username") + if username == "" { + username = "admin" + } + collectionsData.Username = username + + // Render HTML template + c.Header("Content-Type", "text/html") + collectionsComponent := app.ClusterCollections(*collectionsData) + layoutComponent := layout.Layout(c, collectionsComponent) + err = layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// ShowClusterMasters renders the cluster masters page +func (h *ClusterHandlers) ShowClusterMasters(c *gin.Context) { + // Get cluster masters data + mastersData, err := h.adminServer.GetClusterMasters() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster masters: " + err.Error()}) + return + } + + // Set username + username := c.GetString("username") + if username == "" { + username = "admin" + } + mastersData.Username = username + + // Render HTML template + c.Header("Content-Type", "text/html") + mastersComponent := app.ClusterMasters(*mastersData) + layoutComponent := layout.Layout(c, mastersComponent) + err = layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// ShowClusterFilers renders the cluster filers page +func (h *ClusterHandlers) ShowClusterFilers(c *gin.Context) { + // Get cluster filers data + filersData, err := h.adminServer.GetClusterFilers() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get cluster filers: " + err.Error()}) + return + } + + // Set username + username := c.GetString("username") + if username == "" { + username = "admin" + } + filersData.Username = username + + // Render HTML template + c.Header("Content-Type", "text/html") + filersComponent := app.ClusterFilers(*filersData) + layoutComponent := layout.Layout(c, filersComponent) + err = layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// GetClusterTopology returns the cluster topology as JSON +func (h *ClusterHandlers) GetClusterTopology(c *gin.Context) { + topology, err := h.adminServer.GetClusterTopology() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, topology) +} + +// GetMasters returns master node information +func (h *ClusterHandlers) GetMasters(c *gin.Context) { + // Simple master info + c.JSON(http.StatusOK, gin.H{"masters": []gin.H{{"address": "localhost:9333", "status": "active"}}}) +} + +// GetVolumeServers returns volume server information +func (h *ClusterHandlers) GetVolumeServers(c *gin.Context) { + topology, err := h.adminServer.GetClusterTopology() + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, gin.H{"volume_servers": topology.VolumeServers}) +} diff --git a/weed/admin/handlers/file_browser_handlers.go b/weed/admin/handlers/file_browser_handlers.go new file mode 100644 index 000000000..0f40188dc --- /dev/null +++ b/weed/admin/handlers/file_browser_handlers.go @@ -0,0 +1,447 @@ +package handlers + +import ( + "bytes" + "context" + "fmt" + "io" + "mime/multipart" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gin-gonic/gin" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" + "github.com/seaweedfs/seaweedfs/weed/admin/view/app" + "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" + "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" +) + +type FileBrowserHandlers struct { + adminServer *dash.AdminServer +} + +func NewFileBrowserHandlers(adminServer *dash.AdminServer) *FileBrowserHandlers { + return &FileBrowserHandlers{ + adminServer: adminServer, + } +} + +// ShowFileBrowser renders the file browser page +func (h *FileBrowserHandlers) ShowFileBrowser(c *gin.Context) { + // Get path from query parameter, default to root + path := c.DefaultQuery("path", "/") + + // Get file browser data + browserData, err := h.adminServer.GetFileBrowser(path) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get file browser data: " + err.Error()}) + return + } + + // Set username + username := c.GetString("username") + if username == "" { + username = "admin" + } + browserData.Username = username + + // Render HTML template + c.Header("Content-Type", "text/html") + browserComponent := app.FileBrowser(*browserData) + layoutComponent := layout.Layout(c, browserComponent) + err = layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// DeleteFile handles file deletion API requests +func (h *FileBrowserHandlers) DeleteFile(c *gin.Context) { + var request struct { + Path string `json:"path" binding:"required"` + } + + if err := c.ShouldBindJSON(&request); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) + return + } + + // Delete file via filer + err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ + Directory: filepath.Dir(request.Path), + Name: filepath.Base(request.Path), + IsDeleteData: true, + IsRecursive: true, + IgnoreRecursiveError: false, + }) + return err + }) + + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to delete file: " + err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "File deleted successfully"}) +} + +// DeleteMultipleFiles handles multiple file deletion API requests +func (h *FileBrowserHandlers) DeleteMultipleFiles(c *gin.Context) { + var request struct { + Paths []string `json:"paths" binding:"required"` + } + + if err := c.ShouldBindJSON(&request); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) + return + } + + if len(request.Paths) == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "No paths provided"}) + return + } + + var deletedCount int + var failedCount int + var errors []string + + // Delete each file/folder + for _, path := range request.Paths { + err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ + Directory: filepath.Dir(path), + Name: filepath.Base(path), + IsDeleteData: true, + IsRecursive: true, + IgnoreRecursiveError: false, + }) + return err + }) + + if err != nil { + failedCount++ + errors = append(errors, fmt.Sprintf("%s: %v", path, err)) + } else { + deletedCount++ + } + } + + // Prepare response + response := map[string]interface{}{ + "deleted": deletedCount, + "failed": failedCount, + "total": len(request.Paths), + } + + if len(errors) > 0 { + response["errors"] = errors + } + + if deletedCount > 0 { + if failedCount == 0 { + response["message"] = fmt.Sprintf("Successfully deleted %d item(s)", deletedCount) + } else { + response["message"] = fmt.Sprintf("Deleted %d item(s), failed to delete %d item(s)", deletedCount, failedCount) + } + c.JSON(http.StatusOK, response) + } else { + response["message"] = "Failed to delete all selected items" + c.JSON(http.StatusInternalServerError, response) + } +} + +// CreateFolder handles folder creation requests +func (h *FileBrowserHandlers) CreateFolder(c *gin.Context) { + var request struct { + Path string `json:"path" binding:"required"` + FolderName string `json:"folder_name" binding:"required"` + } + + if err := c.ShouldBindJSON(&request); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request: " + err.Error()}) + return + } + + // Clean and validate folder name + folderName := strings.TrimSpace(request.FolderName) + if folderName == "" || strings.Contains(folderName, "/") || strings.Contains(folderName, "\\") { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid folder name"}) + return + } + + // Create full path for new folder + fullPath := filepath.Join(request.Path, folderName) + if !strings.HasPrefix(fullPath, "/") { + fullPath = "/" + fullPath + } + + // Create folder via filer + err := h.adminServer.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + _, err := client.CreateEntry(context.Background(), &filer_pb.CreateEntryRequest{ + Directory: filepath.Dir(fullPath), + Entry: &filer_pb.Entry{ + Name: filepath.Base(fullPath), + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + FileMode: uint32(0755 | (1 << 31)), // Directory mode + Uid: uint32(1000), + Gid: uint32(1000), + Crtime: time.Now().Unix(), + Mtime: time.Now().Unix(), + TtlSec: 0, + }, + }, + }) + return err + }) + + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to create folder: " + err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Folder created successfully"}) +} + +// UploadFile handles file upload requests +func (h *FileBrowserHandlers) UploadFile(c *gin.Context) { + // Get the current path + currentPath := c.PostForm("path") + if currentPath == "" { + currentPath = "/" + } + + // Parse multipart form + err := c.Request.ParseMultipartForm(100 << 20) // 100MB max memory + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Failed to parse multipart form: " + err.Error()}) + return + } + + // Get uploaded files (supports multiple files) + files := c.Request.MultipartForm.File["files"] + if len(files) == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "No files uploaded"}) + return + } + + var uploadResults []map[string]interface{} + var failedUploads []string + + // Process each uploaded file + for _, fileHeader := range files { + // Validate file name + fileName := fileHeader.Filename + if fileName == "" { + failedUploads = append(failedUploads, "invalid filename") + continue + } + + // Create full path for the file + fullPath := filepath.Join(currentPath, fileName) + if !strings.HasPrefix(fullPath, "/") { + fullPath = "/" + fullPath + } + + // Open the file + file, err := fileHeader.Open() + if err != nil { + failedUploads = append(failedUploads, fmt.Sprintf("%s: %v", fileName, err)) + continue + } + + // Upload file to filer + err = h.uploadFileToFiler(fullPath, fileHeader) + file.Close() + + if err != nil { + failedUploads = append(failedUploads, fmt.Sprintf("%s: %v", fileName, err)) + } else { + uploadResults = append(uploadResults, map[string]interface{}{ + "name": fileName, + "size": fileHeader.Size, + "path": fullPath, + }) + } + } + + // Prepare response + response := map[string]interface{}{ + "uploaded": len(uploadResults), + "failed": len(failedUploads), + "files": uploadResults, + } + + if len(failedUploads) > 0 { + response["errors"] = failedUploads + } + + if len(uploadResults) > 0 { + if len(failedUploads) == 0 { + response["message"] = fmt.Sprintf("Successfully uploaded %d file(s)", len(uploadResults)) + } else { + response["message"] = fmt.Sprintf("Uploaded %d file(s), %d failed", len(uploadResults), len(failedUploads)) + } + c.JSON(http.StatusOK, response) + } else { + response["message"] = "All file uploads failed" + c.JSON(http.StatusInternalServerError, response) + } +} + +// uploadFileToFiler uploads a file directly to the filer using multipart form data +func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *multipart.FileHeader) error { + // Get filer address from admin server + filerAddress := h.adminServer.GetFilerAddress() + if filerAddress == "" { + return fmt.Errorf("filer address not configured") + } + + // Validate and sanitize the filer address + if err := h.validateFilerAddress(filerAddress); err != nil { + return fmt.Errorf("invalid filer address: %v", err) + } + + // Validate and sanitize the file path + cleanFilePath, err := h.validateAndCleanFilePath(filePath) + if err != nil { + return fmt.Errorf("invalid file path: %v", err) + } + + // Open the file + file, err := fileHeader.Open() + if err != nil { + return fmt.Errorf("failed to open file: %v", err) + } + defer file.Close() + + // Create multipart form data + var body bytes.Buffer + writer := multipart.NewWriter(&body) + + // Create form file field + part, err := writer.CreateFormFile("file", fileHeader.Filename) + if err != nil { + return fmt.Errorf("failed to create form file: %v", err) + } + + // Copy file content to form + _, err = io.Copy(part, file) + if err != nil { + return fmt.Errorf("failed to copy file content: %v", err) + } + + // Close the writer to finalize the form + err = writer.Close() + if err != nil { + return fmt.Errorf("failed to close multipart writer: %v", err) + } + + // Create the upload URL with validated components + uploadURL := fmt.Sprintf("http://%s%s", filerAddress, cleanFilePath) + + // Create HTTP request + req, err := http.NewRequest("POST", uploadURL, &body) + if err != nil { + return fmt.Errorf("failed to create request: %v", err) + } + + // Set content type with boundary + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // Send request + client := &http.Client{Timeout: 60 * time.Second} // Increased timeout for larger files + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("failed to upload file: %v", err) + } + defer resp.Body.Close() + + // Check response + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + responseBody, _ := io.ReadAll(resp.Body) + return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(responseBody)) + } + + return nil +} + +// validateFilerAddress validates that the filer address is safe to use +func (h *FileBrowserHandlers) validateFilerAddress(address string) error { + if address == "" { + return fmt.Errorf("filer address cannot be empty") + } + + // Parse the address to validate it's a proper host:port format + host, port, err := net.SplitHostPort(address) + if err != nil { + return fmt.Errorf("invalid address format: %v", err) + } + + // Validate host is not empty + if host == "" { + return fmt.Errorf("host cannot be empty") + } + + // Validate port is numeric and in valid range + if port == "" { + return fmt.Errorf("port cannot be empty") + } + + portNum, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("invalid port number: %v", err) + } + + if portNum < 1 || portNum > 65535 { + return fmt.Errorf("port number must be between 1 and 65535") + } + + // Additional security: prevent private network access unless explicitly allowed + // This helps prevent SSRF attacks to internal services + ip := net.ParseIP(host) + if ip != nil { + // Check for localhost, private networks, and other dangerous addresses + if ip.IsLoopback() || ip.IsPrivate() || ip.IsUnspecified() { + // Only allow if it's the configured filer (trusted) + // In production, you might want to be more restrictive + glog.V(2).Infof("Allowing access to private/local address: %s (configured filer)", address) + } + } + + return nil +} + +// validateAndCleanFilePath validates and cleans the file path to prevent path traversal +func (h *FileBrowserHandlers) validateAndCleanFilePath(filePath string) (string, error) { + if filePath == "" { + return "", fmt.Errorf("file path cannot be empty") + } + + // Clean the path to remove any .. or . components + cleanPath := filepath.Clean(filePath) + + // Ensure the path starts with / + if !strings.HasPrefix(cleanPath, "/") { + cleanPath = "/" + cleanPath + } + + // Prevent path traversal attacks + if strings.Contains(cleanPath, "..") { + return "", fmt.Errorf("path traversal not allowed") + } + + // Additional validation: ensure path doesn't contain dangerous characters + if strings.ContainsAny(cleanPath, "\x00\r\n") { + return "", fmt.Errorf("path contains invalid characters") + } + + return cleanPath, nil +} diff --git a/weed/admin/handlers/handlers.go b/weed/admin/handlers/handlers.go new file mode 100644 index 000000000..2a7dc65a5 --- /dev/null +++ b/weed/admin/handlers/handlers.go @@ -0,0 +1,320 @@ +package handlers + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" + "github.com/seaweedfs/seaweedfs/weed/admin/view/app" + "github.com/seaweedfs/seaweedfs/weed/admin/view/layout" +) + +// AdminHandlers contains all the HTTP handlers for the admin interface +type AdminHandlers struct { + adminServer *dash.AdminServer + authHandlers *AuthHandlers + clusterHandlers *ClusterHandlers + fileBrowserHandlers *FileBrowserHandlers +} + +// NewAdminHandlers creates a new instance of AdminHandlers +func NewAdminHandlers(adminServer *dash.AdminServer) *AdminHandlers { + authHandlers := NewAuthHandlers(adminServer) + clusterHandlers := NewClusterHandlers(adminServer) + fileBrowserHandlers := NewFileBrowserHandlers(adminServer) + return &AdminHandlers{ + adminServer: adminServer, + authHandlers: authHandlers, + clusterHandlers: clusterHandlers, + fileBrowserHandlers: fileBrowserHandlers, + } +} + +// SetupRoutes configures all the routes for the admin interface +func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username, password string) { + // Health check (no auth required) + r.GET("/health", h.HealthCheck) + + if authRequired { + // Authentication routes (no auth required) + r.GET("/login", h.authHandlers.ShowLogin) + r.POST("/login", h.authHandlers.HandleLogin(username, password)) + r.GET("/logout", h.authHandlers.HandleLogout) + + // Protected routes group + protected := r.Group("/") + protected.Use(dash.RequireAuth()) + + // Main admin interface routes + protected.GET("/", h.ShowDashboard) + protected.GET("/admin", h.ShowDashboard) + + // Object Store management routes + protected.GET("/object-store/buckets", h.ShowS3Buckets) + protected.GET("/object-store/buckets/:bucket", h.ShowBucketDetails) + protected.GET("/object-store/users", h.ShowObjectStoreUsers) + + // File browser routes + protected.GET("/files", h.fileBrowserHandlers.ShowFileBrowser) + + // Cluster management routes + protected.GET("/cluster/masters", h.clusterHandlers.ShowClusterMasters) + protected.GET("/cluster/filers", h.clusterHandlers.ShowClusterFilers) + protected.GET("/cluster/volume-servers", h.clusterHandlers.ShowClusterVolumeServers) + protected.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes) + protected.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections) + + // API routes for AJAX calls + api := protected.Group("/api") + { + api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology) + api.GET("/cluster/masters", h.clusterHandlers.GetMasters) + api.GET("/cluster/volumes", h.clusterHandlers.GetVolumeServers) + api.GET("/admin", h.adminServer.ShowAdmin) // JSON API for admin data + + // S3 API routes + s3Api := api.Group("/s3") + { + s3Api.GET("/buckets", h.adminServer.ListBucketsAPI) + s3Api.POST("/buckets", h.adminServer.CreateBucket) + s3Api.DELETE("/buckets/:bucket", h.adminServer.DeleteBucket) + s3Api.GET("/buckets/:bucket", h.adminServer.ShowBucketDetails) + } + + // File management API routes + filesApi := api.Group("/files") + { + filesApi.DELETE("/delete", h.fileBrowserHandlers.DeleteFile) + filesApi.DELETE("/delete-multiple", h.fileBrowserHandlers.DeleteMultipleFiles) + filesApi.POST("/create-folder", h.fileBrowserHandlers.CreateFolder) + filesApi.POST("/upload", h.fileBrowserHandlers.UploadFile) + } + } + } else { + // No authentication required - all routes are public + r.GET("/", h.ShowDashboard) + r.GET("/admin", h.ShowDashboard) + + // Object Store management routes + r.GET("/object-store/buckets", h.ShowS3Buckets) + r.GET("/object-store/buckets/:bucket", h.ShowBucketDetails) + r.GET("/object-store/users", h.ShowObjectStoreUsers) + + // File browser routes + r.GET("/files", h.fileBrowserHandlers.ShowFileBrowser) + + // Cluster management routes + r.GET("/cluster/masters", h.clusterHandlers.ShowClusterMasters) + r.GET("/cluster/filers", h.clusterHandlers.ShowClusterFilers) + r.GET("/cluster/volume-servers", h.clusterHandlers.ShowClusterVolumeServers) + r.GET("/cluster/volumes", h.clusterHandlers.ShowClusterVolumes) + r.GET("/cluster/collections", h.clusterHandlers.ShowClusterCollections) + + // API routes for AJAX calls + api := r.Group("/api") + { + api.GET("/cluster/topology", h.clusterHandlers.GetClusterTopology) + api.GET("/cluster/masters", h.clusterHandlers.GetMasters) + api.GET("/cluster/volumes", h.clusterHandlers.GetVolumeServers) + api.GET("/admin", h.adminServer.ShowAdmin) // JSON API for admin data + + // S3 API routes + s3Api := api.Group("/s3") + { + s3Api.GET("/buckets", h.adminServer.ListBucketsAPI) + s3Api.POST("/buckets", h.adminServer.CreateBucket) + s3Api.DELETE("/buckets/:bucket", h.adminServer.DeleteBucket) + s3Api.GET("/buckets/:bucket", h.adminServer.ShowBucketDetails) + } + + // File management API routes + filesApi := api.Group("/files") + { + filesApi.DELETE("/delete", h.fileBrowserHandlers.DeleteFile) + filesApi.DELETE("/delete-multiple", h.fileBrowserHandlers.DeleteMultipleFiles) + filesApi.POST("/create-folder", h.fileBrowserHandlers.CreateFolder) + filesApi.POST("/upload", h.fileBrowserHandlers.UploadFile) + } + } + } +} + +// HealthCheck returns the health status of the admin interface +func (h *AdminHandlers) HealthCheck(c *gin.Context) { + c.JSON(200, gin.H{"status": "ok"}) +} + +// ShowDashboard renders the main admin dashboard +func (h *AdminHandlers) ShowDashboard(c *gin.Context) { + // Get admin data from the server + adminData := h.getAdminData(c) + + // Render HTML template + c.Header("Content-Type", "text/html") + adminComponent := app.Admin(adminData) + layoutComponent := layout.Layout(c, adminComponent) + err := layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// ShowS3Buckets renders the S3 buckets management page +func (h *AdminHandlers) ShowS3Buckets(c *gin.Context) { + // Get S3 buckets data from the server + s3Data := h.getS3BucketsData(c) + + // Render HTML template + c.Header("Content-Type", "text/html") + s3Component := app.S3Buckets(s3Data) + layoutComponent := layout.Layout(c, s3Component) + err := layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// ShowBucketDetails returns detailed information about a specific bucket +func (h *AdminHandlers) ShowBucketDetails(c *gin.Context) { + bucketName := c.Param("bucket") + details, err := h.adminServer.GetBucketDetails(bucketName) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get bucket details: " + err.Error()}) + return + } + c.JSON(http.StatusOK, details) +} + +// ShowObjectStoreUsers renders the object store users management page +func (h *AdminHandlers) ShowObjectStoreUsers(c *gin.Context) { + // Get object store users data from the server + usersData := h.getObjectStoreUsersData(c) + + // Render HTML template + c.Header("Content-Type", "text/html") + usersComponent := app.ObjectStoreUsers(usersData) + layoutComponent := layout.Layout(c, usersComponent) + err := layoutComponent.Render(c.Request.Context(), c.Writer) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) + return + } +} + +// getS3BucketsData retrieves S3 buckets data from the server +func (h *AdminHandlers) getS3BucketsData(c *gin.Context) dash.S3BucketsData { + username := c.GetString("username") + if username == "" { + username = "admin" + } + + // Get S3 buckets + buckets, err := h.adminServer.GetS3Buckets() + if err != nil { + // Return empty data on error + return dash.S3BucketsData{ + Username: username, + Buckets: []dash.S3Bucket{}, + TotalBuckets: 0, + TotalSize: 0, + LastUpdated: time.Now(), + } + } + + // Calculate totals + var totalSize int64 + for _, bucket := range buckets { + totalSize += bucket.Size + } + + return dash.S3BucketsData{ + Username: username, + Buckets: buckets, + TotalBuckets: len(buckets), + TotalSize: totalSize, + LastUpdated: time.Now(), + } +} + +// getObjectStoreUsersData retrieves object store users data from the server +func (h *AdminHandlers) getObjectStoreUsersData(c *gin.Context) dash.ObjectStoreUsersData { + username := c.GetString("username") + if username == "" { + username = "admin" + } + + // Get object store users + users, err := h.adminServer.GetObjectStoreUsers() + if err != nil { + // Return empty data on error + return dash.ObjectStoreUsersData{ + Username: username, + Users: []dash.ObjectStoreUser{}, + TotalUsers: 0, + LastUpdated: time.Now(), + } + } + + return dash.ObjectStoreUsersData{ + Username: username, + Users: users, + TotalUsers: len(users), + LastUpdated: time.Now(), + } +} + +// getAdminData retrieves admin data from the server (now uses consolidated method) +func (h *AdminHandlers) getAdminData(c *gin.Context) dash.AdminData { + username := c.GetString("username") + + // Use the consolidated GetAdminData method from AdminServer + adminData, err := h.adminServer.GetAdminData(username) + if err != nil { + // Return default data when services are not available + if username == "" { + username = "admin" + } + + masterNodes := []dash.MasterNode{ + { + Address: "localhost:9333", + IsLeader: true, + Status: "unreachable", + }, + } + + return dash.AdminData{ + Username: username, + ClusterStatus: "warning", + TotalVolumes: 0, + TotalFiles: 0, + TotalSize: 0, + MasterNodes: masterNodes, + VolumeServers: []dash.VolumeServer{}, + FilerNodes: []dash.FilerNode{}, + DataCenters: []dash.DataCenter{}, + LastUpdated: time.Now(), + SystemHealth: "poor", + } + } + + return adminData +} + +// Helper functions +func (h *AdminHandlers) determineClusterStatus(topology *dash.ClusterTopology, masters []dash.MasterNode) string { + if len(topology.VolumeServers) == 0 { + return "warning" + } + return "healthy" +} + +func (h *AdminHandlers) determineSystemHealth(topology *dash.ClusterTopology, masters []dash.MasterNode) string { + if len(topology.VolumeServers) > 0 && len(masters) > 0 { + return "good" + } + return "fair" +} diff --git a/weed/admin/static/css/admin.css b/weed/admin/static/css/admin.css new file mode 100644 index 000000000..c69876060 --- /dev/null +++ b/weed/admin/static/css/admin.css @@ -0,0 +1,217 @@ +/* SeaweedFS Dashboard Custom Styles */ + +/* Sidebar Styles */ +.sidebar { + position: fixed; + top: 56px; + bottom: 0; + left: 0; + z-index: 100; + padding: 48px 0 0; + box-shadow: inset -1px 0 0 rgba(0, 0, 0, .1); +} + +.sidebar-heading { + font-size: .75rem; + text-transform: uppercase; +} + +.sidebar .nav-link { + font-weight: 500; + color: #333; +} + +.sidebar .nav-link:hover { + color: #007bff; +} + +.sidebar .nav-link.active { + color: #007bff; +} + +.sidebar .nav-link:hover .feather, +.sidebar .nav-link.active .feather { + color: inherit; +} + +/* Main content area */ +main { + margin-left: 240px; +} + +@media (max-width: 767.98px) { + .sidebar { + top: 5rem; + } + main { + margin-left: 0; + } +} + +/* Custom card styles */ +.border-left-primary { + border-left: 0.25rem solid #4e73df !important; +} + +.border-left-success { + border-left: 0.25rem solid #1cc88a !important; +} + +.border-left-info { + border-left: 0.25rem solid #36b9cc !important; +} + +.border-left-warning { + border-left: 0.25rem solid #f6c23e !important; +} + +.border-left-danger { + border-left: 0.25rem solid #e74a3b !important; +} + +/* Status badges */ +.badge { + font-size: 0.875em; +} + +/* Progress bars */ +.progress { + background-color: #f8f9fc; + border: 1px solid #e3e6f0; +} + +.progress-bar { + font-size: 0.75rem; + font-weight: 700; + color: #fff; + text-align: center; +} + +/* Tables */ +.table { + color: #5a5c69; +} + +.table thead th { + vertical-align: bottom; + border-bottom: 1px solid #e3e6f0; + font-weight: 700; + color: #5a5c69; + background-color: #f8f9fc; +} + +.table-bordered { + border: 1px solid #e3e6f0; +} + +.table-bordered th, +.table-bordered td { + border: 1px solid #e3e6f0; +} + +/* Cards */ +.card { + box-shadow: 0 0.15rem 1.75rem 0 rgba(58, 59, 69, 0.15) !important; + border: 1px solid #e3e6f0; +} + +.card-header { + background-color: #f8f9fc; + border-bottom: 1px solid #e3e6f0; +} + +/* Buttons */ +.btn-primary { + background-color: #4e73df; + border-color: #4e73df; +} + +.btn-primary:hover { + background-color: #2e59d9; + border-color: #2653d4; +} + +/* Text utilities */ +.text-gray-800 { + color: #5a5c69 !important; +} + +.text-gray-300 { + color: #dddfeb !important; +} + +/* Animation for HTMX updates */ +.htmx-indicator { + opacity: 0; + transition: opacity 500ms ease-in; +} + +.htmx-request .htmx-indicator { + opacity: 1; +} + +.htmx-request.htmx-indicator { + opacity: 1; +} + +/* Loading spinner */ +.spinner-border-sm { + width: 1rem; + height: 1rem; +} + +/* Custom utilities */ +.bg-gradient-primary { + background: linear-gradient(180deg, #4e73df 10%, #224abe 100%); +} + +.shadow { + box-shadow: 0 0.15rem 1.75rem 0 rgba(58, 59, 69, 0.15) !important; +} + +/* Collapsible menu styles */ +.nav-link[data-bs-toggle="collapse"] { + position: relative; +} + +.nav-link[data-bs-toggle="collapse"] .fa-chevron-down { + transition: transform 0.2s ease; +} + +.nav-link[data-bs-toggle="collapse"][aria-expanded="true"] .fa-chevron-down { + transform: rotate(180deg); +} + +.nav-link[data-bs-toggle="collapse"]:not(.collapsed) { + color: #007bff; +} + +.nav-link[data-bs-toggle="collapse"]:not(.collapsed) .fa-chevron-down { + color: #007bff; +} + +/* Submenu styles */ +.nav .nav { + border-left: 1px solid #e3e6f0; + margin-left: 0.5rem; +} + +.nav .nav .nav-link { + font-size: 0.875rem; + padding-left: 1rem; +} + +.nav .nav .nav-link:hover { + background-color: #f8f9fc; +} + +/* Responsive adjustments */ +@media (max-width: 576px) { + .card-body { + padding: 1rem; + } + + .h5 { + font-size: 1rem; + } +} \ No newline at end of file diff --git a/weed/admin/static/favicon.ico b/weed/admin/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..1059a4099c279b0e0bf1ad51056a1dd472b02235 GIT binary patch literal 5558 zcmZQzU}RuoP*7k1(h3Y3f(#653=9kc0uX)%3j>26P)q|T22{e}0K_15U;tEu4o;Tp z{vXAoVSq0UoNIC#&@}S$_^SWU4=((Fs@z~e-0||n>i;0{>F$~T0}&Q5`$2B__3_pJ zYt!PX77kY?hW{so-(B7VvKkKl{r&y_;pT3txB+e`8H~3Vcl|$8V?mKS$gmbg%lB9J z{-1AkCEE=shLOQ}vTG{Yc7yW#m&aEL;@@B1VYuw~*Z2Q#%*mk0ZhU19vK(^y2RQ`f zW=g^w*=%Ci$o7NEqEi*dbn!RLZ6N#q{`&s^?uu%n>;>hC%e_HFsfF2rkAAeJk7)Bi z_JZ=n{WXnLvmc+^kmc?yD+h-YD2>q5eo%UNada8TS~#GW{h)mC`N2iFy)eepy|W0G z>!5NUIs8avU!R%$|JUcYFe}NTNwpKE{>JQdvg||G1T&8u`r+oD|Dd{%54L2bil2NuAcKyUkD?zp|E=>MM| zU+7x?!0d+67rT7_gYqVfKT41G?{K97^znd^#s`g)fQCgBfEYeb0vtm@2e`s-u!-MU zR`LH*k3Y$IAJT?d4lX}QZ5M#sAP*635Kw;s)K5jm_`>Pls@nhm{@|}i@#zQ2U7Zs9 z|L2F7VE2RiCFh!*P%yfFkeg3eo1ui$xkiWoAMcz5x0}$_qw{ah%lZH9#hw2byHUd$ zR1cw>3(^a!kC5?;qmZx$)fKlF7o)fX-7XNH5V*Ud3a=TUJ_IuUaN`KX$bbL-KifBl r +
+ + ${message} +
+ + + `; + + // Add to toast container or create one + let toastContainer = document.getElementById('toast-container'); + if (!toastContainer) { + toastContainer = document.createElement('div'); + toastContainer.id = 'toast-container'; + toastContainer.className = 'toast-container position-fixed top-0 end-0 p-3'; + toastContainer.style.zIndex = '1055'; + document.body.appendChild(toastContainer); + } + + toastContainer.appendChild(toast); + + // Show toast + const bsToast = new bootstrap.Toast(toast); + bsToast.show(); + + // Remove toast element after it's hidden + toast.addEventListener('hidden.bs.toast', function() { + toast.remove(); + }); +} + +function showSuccessMessage(message) { + // Similar to showErrorMessage but with success styling + const toast = document.createElement('div'); + toast.className = 'toast align-items-center text-white bg-success border-0'; + toast.setAttribute('role', 'alert'); + toast.setAttribute('aria-live', 'assertive'); + toast.setAttribute('aria-atomic', 'true'); + + toast.innerHTML = ` +
+
+ + ${message} +
+ +
+ `; + + let toastContainer = document.getElementById('toast-container'); + if (!toastContainer) { + toastContainer = document.createElement('div'); + toastContainer.id = 'toast-container'; + toastContainer.className = 'toast-container position-fixed top-0 end-0 p-3'; + toastContainer.style.zIndex = '1055'; + document.body.appendChild(toastContainer); + } + + toastContainer.appendChild(toast); + + const bsToast = new bootstrap.Toast(toast); + bsToast.show(); + + toast.addEventListener('hidden.bs.toast', function() { + toast.remove(); + }); +} + +// Format bytes for display +function formatBytes(bytes, decimals = 2) { + if (bytes === 0) return '0 Bytes'; + + const k = 1024; + const dm = decimals < 0 ? 0 : decimals; + const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB']; + + const i = Math.floor(Math.log(bytes) / Math.log(k)); + + return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i]; +} + +// Format numbers with commas +function formatNumber(num) { + return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); +} + +// Confirm action dialogs +function confirmAction(message, callback) { + if (confirm(message)) { + callback(); + } +} + +// Global error handler +window.addEventListener('error', function(e) { + console.error('Global error:', e.error); + showErrorMessage('An unexpected error occurred.'); +}); + +// Export functions for global use +window.Dashboard = { + showErrorMessage, + showSuccessMessage, + formatBytes, + formatNumber, + confirmAction +}; + +// Initialize event handlers +function initializeEventHandlers() { + // S3 Bucket Management + const createBucketForm = document.getElementById('createBucketForm'); + if (createBucketForm) { + createBucketForm.addEventListener('submit', handleCreateBucket); + } + + // Delete bucket buttons + document.addEventListener('click', function(e) { + if (e.target.closest('.delete-bucket-btn')) { + const button = e.target.closest('.delete-bucket-btn'); + const bucketName = button.getAttribute('data-bucket-name'); + confirmDeleteBucket(bucketName); + } + }); +} + +// Setup form validation +function setupFormValidation() { + // Bucket name validation + const bucketNameInput = document.getElementById('bucketName'); + if (bucketNameInput) { + bucketNameInput.addEventListener('input', validateBucketName); + } +} + +// S3 Bucket Management Functions + +// Handle create bucket form submission +async function handleCreateBucket(event) { + event.preventDefault(); + + const form = event.target; + const formData = new FormData(form); + const bucketData = { + name: formData.get('name'), + region: formData.get('region') || 'us-east-1' + }; + + try { + const response = await fetch('/api/s3/buckets', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(bucketData) + }); + + const result = await response.json(); + + if (response.ok) { + // Success + showAlert('success', `Bucket "${bucketData.name}" created successfully!`); + + // Close modal + const modal = bootstrap.Modal.getInstance(document.getElementById('createBucketModal')); + modal.hide(); + + // Reset form + form.reset(); + + // Refresh the page after a short delay + setTimeout(() => { + location.reload(); + }, 1500); + } else { + // Error + showAlert('danger', result.error || 'Failed to create bucket'); + } + } catch (error) { + console.error('Error creating bucket:', error); + showAlert('danger', 'Network error occurred while creating bucket'); + } +} + +// Validate bucket name input +function validateBucketName(event) { + const input = event.target; + const value = input.value; + const isValid = /^[a-z0-9.-]+$/.test(value) && value.length >= 3 && value.length <= 63; + + if (value.length > 0 && !isValid) { + input.setCustomValidity('Bucket name must contain only lowercase letters, numbers, dots, and hyphens (3-63 characters)'); + } else { + input.setCustomValidity(''); + } +} + +// Confirm bucket deletion +function confirmDeleteBucket(bucketName) { + bucketToDelete = bucketName; + document.getElementById('deleteBucketName').textContent = bucketName; + + const modal = new bootstrap.Modal(document.getElementById('deleteBucketModal')); + modal.show(); +} + +// Delete bucket +async function deleteBucket() { + if (!bucketToDelete) { + return; + } + + try { + const response = await fetch(`/api/s3/buckets/${bucketToDelete}`, { + method: 'DELETE' + }); + + const result = await response.json(); + + if (response.ok) { + // Success + showAlert('success', `Bucket "${bucketToDelete}" deleted successfully!`); + + // Close modal + const modal = bootstrap.Modal.getInstance(document.getElementById('deleteBucketModal')); + modal.hide(); + + // Refresh the page after a short delay + setTimeout(() => { + location.reload(); + }, 1500); + } else { + // Error + showAlert('danger', result.error || 'Failed to delete bucket'); + } + } catch (error) { + console.error('Error deleting bucket:', error); + showAlert('danger', 'Network error occurred while deleting bucket'); + } + + bucketToDelete = ''; +} + +// Refresh buckets list +function refreshBuckets() { + location.reload(); +} + +// Export bucket list +function exportBucketList() { + // Get table data + const table = document.getElementById('bucketsTable'); + if (!table) return; + + const rows = Array.from(table.querySelectorAll('tbody tr')); + const data = rows.map(row => { + const cells = row.querySelectorAll('td'); + if (cells.length < 5) return null; // Skip empty state row + + return { + name: cells[0].textContent.trim(), + created: cells[1].textContent.trim(), + objects: cells[2].textContent.trim(), + size: cells[3].textContent.trim(), + status: cells[4].textContent.trim() + }; + }).filter(item => item !== null); + + // Convert to CSV + const csv = [ + ['Name', 'Created', 'Objects', 'Size', 'Status'].join(','), + ...data.map(row => [ + row.name, + row.created, + row.objects, + row.size, + row.status + ].join(',')) + ].join('\n'); + + // Download CSV + const blob = new Blob([csv], { type: 'text/csv' }); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `seaweedfs-buckets-${new Date().toISOString().split('T')[0]}.csv`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + window.URL.revokeObjectURL(url); +} + +// Show alert message +function showAlert(type, message) { + // Remove existing alerts + const existingAlerts = document.querySelectorAll('.alert-floating'); + existingAlerts.forEach(alert => alert.remove()); + + // Create new alert + const alert = document.createElement('div'); + alert.className = `alert alert-${type} alert-dismissible fade show alert-floating`; + alert.style.cssText = ` + position: fixed; + top: 20px; + right: 20px; + z-index: 9999; + min-width: 300px; + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); + `; + + alert.innerHTML = ` + ${message} + + `; + + document.body.appendChild(alert); + + // Auto-remove after 5 seconds + setTimeout(() => { + if (alert.parentNode) { + alert.remove(); + } + }, 5000); +} + +// Format date for display +function formatDate(date) { + return new Date(date).toLocaleString(); +} + +// Copy text to clipboard +function copyToClipboard(text) { + navigator.clipboard.writeText(text).then(() => { + showAlert('success', 'Copied to clipboard!'); + }).catch(err => { + console.error('Failed to copy text: ', err); + showAlert('danger', 'Failed to copy to clipboard'); + }); +} + +// Dashboard refresh functionality +function refreshDashboard() { + location.reload(); +} + +// Cluster management functions + +// Export volume servers data as CSV +function exportVolumeServers() { + const table = document.getElementById('hostsTable'); + if (!table) { + showErrorMessage('No volume servers data to export'); + return; + } + + let csv = 'Server ID,Address,Data Center,Rack,Volumes,Capacity,Usage,Status\n'; + + const rows = table.querySelectorAll('tbody tr'); + rows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 8) { + const rowData = [ + cells[0].textContent.trim(), + cells[1].textContent.trim(), + cells[2].textContent.trim(), + cells[3].textContent.trim(), + cells[4].textContent.trim(), + cells[5].textContent.trim(), + cells[6].textContent.trim(), + cells[7].textContent.trim() + ]; + csv += rowData.join(',') + '\n'; + } + }); + + downloadCSV(csv, 'seaweedfs-volume-servers.csv'); +} + +// Export volumes data as CSV +function exportVolumes() { + const table = document.getElementById('volumesTable'); + if (!table) { + showErrorMessage('No volumes data to export'); + return; + } + + let csv = 'Volume ID,Server,Data Center,Rack,Collection,Size,File Count,Replication,Status\n'; + + const rows = table.querySelectorAll('tbody tr'); + rows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 9) { + const rowData = [ + cells[0].textContent.trim(), + cells[1].textContent.trim(), + cells[2].textContent.trim(), + cells[3].textContent.trim(), + cells[4].textContent.trim(), + cells[5].textContent.trim(), + cells[6].textContent.trim(), + cells[7].textContent.trim(), + cells[8].textContent.trim() + ]; + csv += rowData.join(',') + '\n'; + } + }); + + downloadCSV(csv, 'seaweedfs-volumes.csv'); +} + +// Export collections data as CSV +function exportCollections() { + const table = document.getElementById('collectionsTable'); + if (!table) { + showAlert('error', 'Collections table not found'); + return; + } + + const headers = ['Collection Name', 'Data Center', 'Replication', 'Volume Count', 'TTL', 'Disk Type', 'Status']; + const rows = []; + + // Get table rows + const tableRows = table.querySelectorAll('tbody tr'); + tableRows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 7) { + rows.push([ + cells[0].textContent.trim(), + cells[1].textContent.trim(), + cells[2].textContent.trim(), + cells[3].textContent.trim(), + cells[4].textContent.trim(), + cells[5].textContent.trim(), + cells[6].textContent.trim() + ]); + } + }); + + // Generate CSV + const csvContent = [headers, ...rows] + .map(row => row.map(cell => `"${cell}"`).join(',')) + .join('\n'); + + // Download + const filename = `seaweedfs-collections-${new Date().toISOString().split('T')[0]}.csv`; + downloadCSV(csvContent, filename); +} + +// Export Masters to CSV +function exportMasters() { + const table = document.getElementById('mastersTable'); + if (!table) { + showAlert('error', 'Masters table not found'); + return; + } + + const headers = ['Address', 'Role', 'Suffrage', 'Status']; + const rows = []; + + // Get table rows + const tableRows = table.querySelectorAll('tbody tr'); + tableRows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 4) { + rows.push([ + cells[0].textContent.trim(), + cells[1].textContent.trim(), + cells[2].textContent.trim(), + cells[3].textContent.trim() + ]); + } + }); + + // Generate CSV + const csvContent = [headers, ...rows] + .map(row => row.map(cell => `"${cell}"`).join(',')) + .join('\n'); + + // Download + const filename = `seaweedfs-masters-${new Date().toISOString().split('T')[0]}.csv`; + downloadCSV(csvContent, filename); +} + +// Export Filers to CSV +function exportFilers() { + const table = document.getElementById('filersTable'); + if (!table) { + showAlert('error', 'Filers table not found'); + return; + } + + const headers = ['Address', 'Version', 'Data Center', 'Rack', 'Created At', 'Status']; + const rows = []; + + // Get table rows + const tableRows = table.querySelectorAll('tbody tr'); + tableRows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 6) { + rows.push([ + cells[0].textContent.trim(), + cells[1].textContent.trim(), + cells[2].textContent.trim(), + cells[3].textContent.trim(), + cells[4].textContent.trim(), + cells[5].textContent.trim() + ]); + } + }); + + // Generate CSV + const csvContent = [headers, ...rows] + .map(row => row.map(cell => `"${cell}"`).join(',')) + .join('\n'); + + // Download + const filename = `seaweedfs-filers-${new Date().toISOString().split('T')[0]}.csv`; + downloadCSV(csvContent, filename); +} + +// Export Users to CSV +function exportUsers() { + const table = document.getElementById('usersTable'); + if (!table) { + showAlert('error', 'Users table not found'); + return; + } + + const headers = ['Username', 'Email', 'Access Key', 'Status', 'Created', 'Last Login']; + const rows = []; + + // Get table rows + const tableRows = table.querySelectorAll('tbody tr'); + tableRows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 6) { + rows.push([ + cells[0].textContent.trim(), + cells[1].textContent.trim(), + cells[2].textContent.trim(), + cells[3].textContent.trim(), + cells[4].textContent.trim(), + cells[5].textContent.trim() + ]); + } + }); + + // Generate CSV + const csvContent = [headers, ...rows] + .map(row => row.map(cell => `"${cell}"`).join(',')) + .join('\n'); + + // Download + const filename = `seaweedfs-users-${new Date().toISOString().split('T')[0]}.csv`; + downloadCSV(csvContent, filename); +} + +// Confirm delete collection +function confirmDeleteCollection(button) { + const collectionName = button.getAttribute('data-collection-name'); + document.getElementById('deleteCollectionName').textContent = collectionName; + + const modal = new bootstrap.Modal(document.getElementById('deleteCollectionModal')); + modal.show(); + + // Set up confirm button + document.getElementById('confirmDeleteCollection').onclick = function() { + deleteCollection(collectionName); + }; +} + +// Delete collection +async function deleteCollection(collectionName) { + try { + const response = await fetch(`/api/collections/${collectionName}`, { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json', + } + }); + + if (response.ok) { + showSuccessMessage(`Collection "${collectionName}" deleted successfully`); + // Hide modal + const modal = bootstrap.Modal.getInstance(document.getElementById('deleteCollectionModal')); + modal.hide(); + // Refresh page + setTimeout(() => { + window.location.reload(); + }, 1000); + } else { + const error = await response.json(); + showErrorMessage(`Failed to delete collection: ${error.error || 'Unknown error'}`); + } + } catch (error) { + console.error('Error deleting collection:', error); + showErrorMessage('Failed to delete collection. Please try again.'); + } +} + +// Handle create collection form submission +document.addEventListener('DOMContentLoaded', function() { + const createCollectionForm = document.getElementById('createCollectionForm'); + if (createCollectionForm) { + createCollectionForm.addEventListener('submit', handleCreateCollection); + } +}); + +async function handleCreateCollection(event) { + event.preventDefault(); + + const formData = new FormData(event.target); + const collectionData = { + name: formData.get('name'), + replication: formData.get('replication'), + ttl: formData.get('ttl'), + diskType: formData.get('diskType') + }; + + try { + const response = await fetch('/api/collections', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(collectionData) + }); + + if (response.ok) { + showSuccessMessage(`Collection "${collectionData.name}" created successfully`); + // Hide modal + const modal = bootstrap.Modal.getInstance(document.getElementById('createCollectionModal')); + modal.hide(); + // Reset form + event.target.reset(); + // Refresh page + setTimeout(() => { + window.location.reload(); + }, 1000); + } else { + const error = await response.json(); + showErrorMessage(`Failed to create collection: ${error.error || 'Unknown error'}`); + } + } catch (error) { + console.error('Error creating collection:', error); + showErrorMessage('Failed to create collection. Please try again.'); + } +} + +// Download CSV utility function +function downloadCSV(csvContent, filename) { + const blob = new Blob([csvContent], { type: 'text/csv;charset=utf-8;' }); + const link = document.createElement('a'); + + if (link.download !== undefined) { + const url = URL.createObjectURL(blob); + link.setAttribute('href', url); + link.setAttribute('download', filename); + link.style.visibility = 'hidden'; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + } +} + +// File Browser Functions + +// Toggle select all checkboxes +function toggleSelectAll() { + const selectAll = document.getElementById('selectAll'); + const checkboxes = document.querySelectorAll('.file-checkbox'); + + checkboxes.forEach(checkbox => { + checkbox.checked = selectAll.checked; + }); + + updateDeleteSelectedButton(); +} + +// Update visibility of delete selected button based on selection +function updateDeleteSelectedButton() { + const checkboxes = document.querySelectorAll('.file-checkbox:checked'); + const deleteBtn = document.getElementById('deleteSelectedBtn'); + + if (deleteBtn) { + if (checkboxes.length > 0) { + deleteBtn.style.display = 'inline-block'; + deleteBtn.innerHTML = `Delete Selected (${checkboxes.length})`; + } else { + deleteBtn.style.display = 'none'; + } + } +} + +// Update select all checkbox state based on individual selections +function updateSelectAllCheckbox() { + const selectAll = document.getElementById('selectAll'); + const allCheckboxes = document.querySelectorAll('.file-checkbox'); + const checkedCheckboxes = document.querySelectorAll('.file-checkbox:checked'); + + if (selectAll && allCheckboxes.length > 0) { + if (checkedCheckboxes.length === 0) { + selectAll.checked = false; + selectAll.indeterminate = false; + } else if (checkedCheckboxes.length === allCheckboxes.length) { + selectAll.checked = true; + selectAll.indeterminate = false; + } else { + selectAll.checked = false; + selectAll.indeterminate = true; + } + } +} + +// Get selected file paths +function getSelectedFilePaths() { + const checkboxes = document.querySelectorAll('.file-checkbox:checked'); + return Array.from(checkboxes).map(cb => cb.value); +} + +// Confirm delete selected files +function confirmDeleteSelected() { + const selectedPaths = getSelectedFilePaths(); + + if (selectedPaths.length === 0) { + showAlert('warning', 'No files selected'); + return; + } + + const fileNames = selectedPaths.map(path => path.split('/').pop()).join(', '); + const message = selectedPaths.length === 1 + ? `Are you sure you want to delete "${fileNames}"?` + : `Are you sure you want to delete ${selectedPaths.length} selected items?\n\n${fileNames.substring(0, 200)}${fileNames.length > 200 ? '...' : ''}`; + + if (confirm(message)) { + deleteSelectedFiles(selectedPaths); + } +} + +// Delete multiple selected files +async function deleteSelectedFiles(filePaths) { + if (!filePaths || filePaths.length === 0) { + showAlert('warning', 'No files selected'); + return; + } + + // Disable the delete button during operation + const deleteBtn = document.getElementById('deleteSelectedBtn'); + const originalText = deleteBtn.innerHTML; + deleteBtn.disabled = true; + deleteBtn.innerHTML = 'Deleting...'; + + try { + const response = await fetch('/api/files/delete-multiple', { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ paths: filePaths }) + }); + + if (response.ok) { + const result = await response.json(); + + if (result.deleted > 0) { + if (result.failed === 0) { + showAlert('success', `Successfully deleted ${result.deleted} item(s)`); + } else { + showAlert('warning', `Deleted ${result.deleted} item(s), failed to delete ${result.failed} item(s)`); + if (result.errors && result.errors.length > 0) { + console.warn('Deletion errors:', result.errors); + } + } + + // Reload the page to update the file list + setTimeout(() => { + window.location.reload(); + }, 1000); + } else { + let errorMessage = result.message || 'Failed to delete all selected items'; + if (result.errors && result.errors.length > 0) { + errorMessage += ': ' + result.errors.join(', '); + } + showAlert('error', errorMessage); + } + } else { + const error = await response.json(); + showAlert('error', `Failed to delete files: ${error.error || 'Unknown error'}`); + } + } catch (error) { + console.error('Delete error:', error); + showAlert('error', 'Failed to delete files'); + } finally { + // Re-enable the button + deleteBtn.disabled = false; + deleteBtn.innerHTML = originalText; + } +} + +// Create new folder +function createFolder() { + const modal = new bootstrap.Modal(document.getElementById('createFolderModal')); + modal.show(); +} + +// Upload file +function uploadFile() { + const modal = new bootstrap.Modal(document.getElementById('uploadFileModal')); + modal.show(); +} + +// Submit create folder form +async function submitCreateFolder() { + const folderName = document.getElementById('folderName').value.trim(); + const currentPath = document.getElementById('currentPath').value; + + if (!folderName) { + showErrorMessage('Please enter a folder name'); + return; + } + + // Validate folder name + if (folderName.includes('/') || folderName.includes('\\')) { + showErrorMessage('Folder names cannot contain / or \\ characters'); + return; + } + + // Additional validation for reserved names + const reservedNames = ['.', '..', 'CON', 'PRN', 'AUX', 'NUL']; + if (reservedNames.includes(folderName.toUpperCase())) { + showErrorMessage('This folder name is reserved and cannot be used'); + return; + } + + // Disable the button to prevent double submission + const submitButton = document.querySelector('#createFolderModal .btn-primary'); + const originalText = submitButton.innerHTML; + submitButton.disabled = true; + submitButton.innerHTML = 'Creating...'; + + try { + const response = await fetch('/api/files/create-folder', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + path: currentPath, + folder_name: folderName + }) + }); + + if (response.ok) { + showSuccessMessage(`Folder "${folderName}" created successfully`); + // Hide modal + const modal = bootstrap.Modal.getInstance(document.getElementById('createFolderModal')); + modal.hide(); + // Clear form + document.getElementById('folderName').value = ''; + // Refresh page + setTimeout(() => { + window.location.reload(); + }, 1000); + } else { + const error = await response.json(); + showErrorMessage(`Failed to create folder: ${error.error || 'Unknown error'}`); + } + } catch (error) { + console.error('Create folder error:', error); + showErrorMessage('Failed to create folder. Please try again.'); + } finally { + // Re-enable the button + submitButton.disabled = false; + submitButton.innerHTML = originalText; + } +} + +// Submit upload file form +async function submitUploadFile() { + const fileInput = document.getElementById('fileInput'); + const currentPath = document.getElementById('uploadPath').value; + + if (!fileInput.files || fileInput.files.length === 0) { + showErrorMessage('Please select at least one file to upload'); + return; + } + + const files = Array.from(fileInput.files); + const totalSize = files.reduce((sum, file) => sum + file.size, 0); + + // Validate total file size (limit to 500MB for admin interface) + const maxSize = 500 * 1024 * 1024; // 500MB total + if (totalSize > maxSize) { + showErrorMessage('Total file size exceeds 500MB limit. Please select fewer or smaller files.'); + return; + } + + // Validate individual file sizes + const maxIndividualSize = 100 * 1024 * 1024; // 100MB per file + const oversizedFiles = files.filter(file => file.size > maxIndividualSize); + if (oversizedFiles.length > 0) { + showErrorMessage(`Some files exceed 100MB limit: ${oversizedFiles.map(f => f.name).join(', ')}`); + return; + } + + const formData = new FormData(); + files.forEach(file => { + formData.append('files', file); + }); + formData.append('path', currentPath); + + // Show progress bar and disable button + const progressContainer = document.getElementById('uploadProgress'); + const progressBar = progressContainer.querySelector('.progress-bar'); + const uploadStatus = document.getElementById('uploadStatus'); + const submitButton = document.querySelector('#uploadFileModal .btn-primary'); + const originalText = submitButton.innerHTML; + + progressContainer.style.display = 'block'; + progressBar.style.width = '0%'; + progressBar.setAttribute('aria-valuenow', '0'); + progressBar.textContent = '0%'; + uploadStatus.textContent = `Uploading ${files.length} file(s)...`; + submitButton.disabled = true; + submitButton.innerHTML = 'Uploading...'; + + try { + const xhr = new XMLHttpRequest(); + + // Handle progress + xhr.upload.addEventListener('progress', function(e) { + if (e.lengthComputable) { + const percentComplete = Math.round((e.loaded / e.total) * 100); + progressBar.style.width = percentComplete + '%'; + progressBar.setAttribute('aria-valuenow', percentComplete); + progressBar.textContent = percentComplete + '%'; + uploadStatus.textContent = `Uploading ${files.length} file(s)... ${percentComplete}%`; + } + }); + + // Handle completion + xhr.addEventListener('load', function() { + if (xhr.status === 200) { + try { + const response = JSON.parse(xhr.responseText); + + if (response.uploaded > 0) { + if (response.failed === 0) { + showSuccessMessage(`Successfully uploaded ${response.uploaded} file(s)`); + } else { + showSuccessMessage(response.message); + // Show details of failed uploads + if (response.errors && response.errors.length > 0) { + console.warn('Upload errors:', response.errors); + } + } + + // Hide modal and refresh page + const modal = bootstrap.Modal.getInstance(document.getElementById('uploadFileModal')); + modal.hide(); + setTimeout(() => { + window.location.reload(); + }, 1000); + } else { + let errorMessage = response.message || 'All file uploads failed'; + if (response.errors && response.errors.length > 0) { + errorMessage += ': ' + response.errors.join(', '); + } + showErrorMessage(errorMessage); + } + } catch (e) { + showErrorMessage('Upload completed but response format was unexpected'); + } + progressContainer.style.display = 'none'; + } else { + let errorMessage = 'Unknown error'; + try { + const error = JSON.parse(xhr.responseText); + errorMessage = error.error || error.message || errorMessage; + } catch (e) { + errorMessage = `Server returned status ${xhr.status}`; + } + showErrorMessage(`Failed to upload files: ${errorMessage}`); + progressContainer.style.display = 'none'; + } + }); + + // Handle errors + xhr.addEventListener('error', function() { + showErrorMessage('Failed to upload files. Please check your connection and try again.'); + progressContainer.style.display = 'none'; + }); + + // Handle abort + xhr.addEventListener('abort', function() { + showErrorMessage('File upload was cancelled.'); + progressContainer.style.display = 'none'; + }); + + // Send request + xhr.open('POST', '/api/files/upload'); + xhr.send(formData); + + } catch (error) { + console.error('Upload error:', error); + showErrorMessage('Failed to upload files. Please try again.'); + progressContainer.style.display = 'none'; + } finally { + // Re-enable the button + submitButton.disabled = false; + submitButton.innerHTML = originalText; + } +} + +// Export file list to CSV +function exportFileList() { + const table = document.getElementById('fileTable'); + if (!table) { + showAlert('error', 'File table not found'); + return; + } + + const headers = ['Name', 'Size', 'Type', 'Modified', 'Permissions']; + const rows = []; + + // Get table rows + const tableRows = table.querySelectorAll('tbody tr'); + tableRows.forEach(row => { + const cells = row.querySelectorAll('td'); + if (cells.length >= 6) { + rows.push([ + cells[1].textContent.trim(), // Name + cells[2].textContent.trim(), // Size + cells[3].textContent.trim(), // Type + cells[4].textContent.trim(), // Modified + cells[5].textContent.trim() // Permissions + ]); + } + }); + + // Generate CSV + const csvContent = [headers, ...rows] + .map(row => row.map(cell => `"${cell}"`).join(',')) + .join('\n'); + + // Download + const filename = `seaweedfs-files-${new Date().toISOString().split('T')[0]}.csv`; + downloadCSV(csvContent, filename); +} + +// Download file +function downloadFile(filePath) { + // Create download link using filer direct access + const downloadUrl = `/files/download?path=${encodeURIComponent(filePath)}`; + window.open(downloadUrl, '_blank'); +} + +// View file +function viewFile(filePath) { + // TODO: Implement file viewer functionality + showAlert('info', `File viewer for ${filePath} will be implemented`); +} + +// Show file properties +function showProperties(filePath) { + // TODO: Implement file properties modal + showAlert('info', `Properties for ${filePath} will be implemented`); +} + +// Confirm delete file/folder +function confirmDelete(filePath) { + if (confirm(`Are you sure you want to delete "${filePath}"?`)) { + deleteFile(filePath); + } +} + +// Delete file/folder +async function deleteFile(filePath) { + try { + const response = await fetch('/api/files/delete', { + method: 'DELETE', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ path: filePath }) + }); + + if (response.ok) { + showAlert('success', `Successfully deleted "${filePath}"`); + // Reload the page to update the file list + window.location.reload(); + } else { + const error = await response.json(); + showAlert('error', `Failed to delete file: ${error.error || 'Unknown error'}`); + } + } catch (error) { + console.error('Delete error:', error); + showAlert('error', 'Failed to delete file'); + } +} + +// Setup file manager specific event handlers +function setupFileManagerEventHandlers() { + // Handle Enter key in folder name input + const folderNameInput = document.getElementById('folderName'); + if (folderNameInput) { + folderNameInput.addEventListener('keypress', function(e) { + if (e.key === 'Enter') { + e.preventDefault(); + submitCreateFolder(); + } + }); + } + + // Handle file selection change to show preview + const fileInput = document.getElementById('fileInput'); + if (fileInput) { + fileInput.addEventListener('change', function(e) { + updateFileListPreview(); + }); + } + + // Setup checkbox event listeners for file selection + const checkboxes = document.querySelectorAll('.file-checkbox'); + checkboxes.forEach(checkbox => { + checkbox.addEventListener('change', function() { + updateDeleteSelectedButton(); + updateSelectAllCheckbox(); + }); + }); + + // Setup drag and drop for file uploads + setupDragAndDrop(); + + // Clear form when modals are hidden + const createFolderModal = document.getElementById('createFolderModal'); + if (createFolderModal) { + createFolderModal.addEventListener('hidden.bs.modal', function() { + document.getElementById('folderName').value = ''; + }); + } + + const uploadFileModal = document.getElementById('uploadFileModal'); + if (uploadFileModal) { + uploadFileModal.addEventListener('hidden.bs.modal', function() { + const fileInput = document.getElementById('fileInput'); + const progressContainer = document.getElementById('uploadProgress'); + const fileListPreview = document.getElementById('fileListPreview'); + fileInput.value = ''; + progressContainer.style.display = 'none'; + fileListPreview.style.display = 'none'; + }); + } +} + +// Setup drag and drop functionality +function setupDragAndDrop() { + const dropZone = document.querySelector('.card-body'); // Main file listing area + const uploadModal = document.getElementById('uploadFileModal'); + + if (!dropZone || !uploadModal) return; + + // Prevent default drag behaviors + ['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => { + dropZone.addEventListener(eventName, preventDefaults, false); + document.body.addEventListener(eventName, preventDefaults, false); + }); + + // Highlight drop zone when item is dragged over it + ['dragenter', 'dragover'].forEach(eventName => { + dropZone.addEventListener(eventName, highlight, false); + }); + + ['dragleave', 'drop'].forEach(eventName => { + dropZone.addEventListener(eventName, unhighlight, false); + }); + + // Handle dropped files + dropZone.addEventListener('drop', handleDrop, false); + + function preventDefaults(e) { + e.preventDefault(); + e.stopPropagation(); + } + + function highlight(e) { + dropZone.classList.add('drag-over'); + // Add some visual feedback + if (!dropZone.querySelector('.drag-overlay')) { + const overlay = document.createElement('div'); + overlay.className = 'drag-overlay'; + overlay.innerHTML = ` +
+ +
Drop files here to upload
+

Release to upload files to this directory

+
+ `; + overlay.style.cssText = ` + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: rgba(255, 255, 255, 0.9); + border: 2px dashed #007bff; + border-radius: 0.375rem; + z-index: 1000; + display: flex; + align-items: center; + justify-content: center; + `; + dropZone.style.position = 'relative'; + dropZone.appendChild(overlay); + } + } + + function unhighlight(e) { + dropZone.classList.remove('drag-over'); + const overlay = dropZone.querySelector('.drag-overlay'); + if (overlay) { + overlay.remove(); + } + } + + function handleDrop(e) { + const dt = e.dataTransfer; + const files = dt.files; + + if (files.length > 0) { + // Open upload modal and set files + const fileInput = document.getElementById('fileInput'); + if (fileInput) { + // Create a new FileList-like object + const fileArray = Array.from(files); + + // Set files to input (this is a bit tricky with file inputs) + const dataTransfer = new DataTransfer(); + fileArray.forEach(file => dataTransfer.items.add(file)); + fileInput.files = dataTransfer.files; + + // Update preview and show modal + updateFileListPreview(); + const modal = new bootstrap.Modal(uploadModal); + modal.show(); + } + } + } +} + +// Update file list preview when files are selected +function updateFileListPreview() { + const fileInput = document.getElementById('fileInput'); + const fileListPreview = document.getElementById('fileListPreview'); + const selectedFilesList = document.getElementById('selectedFilesList'); + + if (!fileInput.files || fileInput.files.length === 0) { + fileListPreview.style.display = 'none'; + return; + } + + const files = Array.from(fileInput.files); + const totalSize = files.reduce((sum, file) => sum + file.size, 0); + + let html = `
+ ${files.length} file(s) selected + Total: ${formatBytes(totalSize)} +
`; + + files.forEach((file, index) => { + const fileIcon = getFileIconByName(file.name); + html += `
+
+ + ${file.name} +
+ ${formatBytes(file.size)} +
`; + }); + + selectedFilesList.innerHTML = html; + fileListPreview.style.display = 'block'; +} + +// Get file icon based on file name/extension +function getFileIconByName(fileName) { + const ext = fileName.split('.').pop().toLowerCase(); + + switch (ext) { + case 'jpg': + case 'jpeg': + case 'png': + case 'gif': + case 'bmp': + case 'svg': + return 'fa-image'; + case 'mp4': + case 'avi': + case 'mov': + case 'wmv': + case 'flv': + return 'fa-video'; + case 'mp3': + case 'wav': + case 'flac': + case 'aac': + return 'fa-music'; + case 'pdf': + return 'fa-file-pdf'; + case 'doc': + case 'docx': + return 'fa-file-word'; + case 'xls': + case 'xlsx': + return 'fa-file-excel'; + case 'ppt': + case 'pptx': + return 'fa-file-powerpoint'; + case 'txt': + case 'md': + return 'fa-file-text'; + case 'zip': + case 'rar': + case '7z': + case 'tar': + case 'gz': + return 'fa-file-archive'; + case 'js': + case 'ts': + case 'html': + case 'css': + case 'json': + case 'xml': + return 'fa-file-code'; + default: + return 'fa-file'; + } +} + + \ No newline at end of file diff --git a/weed/admin/view/app/admin.templ b/weed/admin/view/app/admin.templ new file mode 100644 index 000000000..ceb11b0f2 --- /dev/null +++ b/weed/admin/view/app/admin.templ @@ -0,0 +1,351 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ Admin(data dash.AdminData) { +
+

+ Dashboard +

+ +
+ +
+ +
+
+
+
+
+
+
+ Cluster Status +
+
+ + {data.ClusterStatus} + +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Volumes +
+
+ {fmt.Sprintf("%d", data.TotalVolumes)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Files +
+
+ {formatNumber(data.TotalFiles)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Size +
+
+ {formatBytes(data.TotalSize)} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
+ Master Nodes +
+
+
+
+ + + + + + + + + + for _, master := range data.MasterNodes { + + + + + + } + +
AddressRoleStatus
{master.Address} + if master.IsLeader { + Leader + } else { + Follower + } + + + {master.Status} + +
+
+
+
+
+ + +
+
+
+
+ System Health +
+
+
+
+

+ {data.SystemHealth} +

+
+
+
+
+
+
{fmt.Sprintf("%d", len(data.MasterNodes))}
+ Masters +
+
+
+
+
+
+
{fmt.Sprintf("%d", len(data.VolumeServers))}
+ Volume Servers +
+
+
+
+
+
+
{fmt.Sprintf("%d", len(data.FilerNodes))}
+ Filers +
+
+
+
+
+
+
+
+ + +
+
+
+
+
+ Volume Servers +
+ +
+
+
+ + + + + + + + + + + + + + for _, vs := range data.VolumeServers { + + + + + + + + + + } + if len(data.VolumeServers) == 0 { + + + + } + +
IDAddressData CenterRackVolumesCapacityStatus
{vs.ID} + + {vs.Address} + + + {vs.DataCenter}{vs.Rack} +
+
+ {fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes)} +
+
+
{formatBytes(vs.DiskUsage)} / {formatBytes(vs.DiskCapacity)} + + {vs.Status} + +
+ + No volume servers found +
+
+
+
+
+
+ + +
+
+
+
+
+ Filer Nodes +
+ +
+
+
+ + + + + + + + + + + + for _, filer := range data.FilerNodes { + + + + + + + + } + if len(data.FilerNodes) == 0 { + + + + } + +
AddressData CenterRackStatusLast Updated
+ + {filer.Address} + + + {filer.DataCenter}{filer.Rack} + + {filer.Status} + + {filer.LastUpdated.Format("2006-01-02 15:04:05")}
+ + No filer nodes found +
+
+
+
+
+
+ + +
+
+ + + Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} + +
+
+
+} \ No newline at end of file diff --git a/weed/admin/view/app/admin_templ.go b/weed/admin/view/app/admin_templ.go new file mode 100644 index 000000000..8e83bc90e --- /dev/null +++ b/weed/admin/view/app/admin_templ.go @@ -0,0 +1,555 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func Admin(data dash.AdminData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Dashboard

Cluster Status
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(data.ClusterStatus))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var2...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.ClusterStatus) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 36, Col: 59} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Total Volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 57, Col: 73} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Total Files
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(formatNumber(data.TotalFiles)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 77, Col: 66} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Total Size
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 97, Col: 64} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Master Nodes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, master := range data.MasterNodes { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
AddressRoleStatus
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 131, Col: 63} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if master.IsLeader { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "Leader") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "Follower") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(master.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var9...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var11 string + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(master.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 141, Col: 66} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
System Health
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var12 = []any{fmt.Sprintf("text-%s", getHealthColor(data.SystemHealth))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var12...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var14 string + templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.SystemHealth) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 164, Col: 50} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var15 string + templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.MasterNodes))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 171, Col: 85} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
Masters
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var16 string + templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.VolumeServers))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 179, Col: 87} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
Volume Servers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var17 string + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", len(data.FilerNodes))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 187, Col: 84} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
Filers
Volume Servers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, vs := range data.VolumeServers { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + if len(data.VolumeServers) == 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
IDAddressData CenterRackVolumesCapacityStatus
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var18 string + templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(vs.ID) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 234, Col: 54} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var20 string + templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Address) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 237, Col: 63} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var21 string + templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(vs.DataCenter) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 241, Col: 62} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var22 string + templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Rack) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 242, Col: 56} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var24 string + templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", vs.Volumes, vs.MaxVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 247, Col: 104} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var25 string + templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskUsage)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 251, Col: 74} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, " / ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var26 string + templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(vs.DiskCapacity)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 251, Col: 107} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var27 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(vs.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var27...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var29 string + templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(vs.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 254, Col: 62} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "
No volume servers found
Filer Nodes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, filer := range data.FilerNodes { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + if len(data.FilerNodes) == 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "
AddressData CenterRackStatusLast Updated
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var31 string + templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 311, Col: 66} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var32 string + templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 315, Col: 65} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var33 string + templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 316, Col: 59} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var34 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(filer.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var34...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var36 string + templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 319, Col: 65} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var37 string + templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(filer.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 322, Col: 96} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "
No filer nodes found
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var38 string + templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/admin.templ`, Line: 346, Col: 81} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_collections.templ b/weed/admin/view/app/cluster_collections.templ new file mode 100644 index 000000000..1eecb7b7d --- /dev/null +++ b/weed/admin/view/app/cluster_collections.templ @@ -0,0 +1,360 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ ClusterCollections(data dash.ClusterCollectionsData) { +
+

+ Cluster Collections +

+
+
+ + +
+
+
+ +
+ +
+
+
+
+
+
+
+ Total Collections +
+
+ {fmt.Sprintf("%d", data.TotalCollections)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Collections +
+
+ {fmt.Sprintf("%d", countActiveCollections(data.Collections))} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Volumes +
+
+ {fmt.Sprintf("%d", data.TotalVolumes)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Files +
+
+ {fmt.Sprintf("%d", data.TotalFiles)} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ Total Storage Size +
+
+ {formatBytes(data.TotalSize)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Data Centers +
+
+ {fmt.Sprintf("%d", countUniqueCollectionDataCenters(data.Collections))} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ Collection Details +
+
+
+ if len(data.Collections) > 0 { +
+ + + + + + + + + + + + + + + + + for _, collection := range data.Collections { + + + + + + + + + + + + + } + +
Collection NameData CenterReplicationVolumesFilesSizeTTLDisk TypeStatusActions
+ {collection.Name} + + {collection.DataCenter} + + {collection.Replication} + +
+ + {fmt.Sprintf("%d", collection.VolumeCount)} +
+
+
+ + {fmt.Sprintf("%d", collection.FileCount)} +
+
+
+ + {formatBytes(collection.TotalSize)} +
+
+ if collection.TTL != "" { + {collection.TTL} + } else { + None + } + + {collection.DiskType} + + + {collection.Status} + + +
+ + + +
+
+
+ } else { +
+ +
No Collections Found
+

No collections are currently configured in the cluster.

+ +
+ } +
+
+ + +
+
+ + + Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} + +
+
+
+ + + + + + +} + +func countActiveCollections(collections []dash.CollectionInfo) int { + count := 0 + for _, collection := range collections { + if collection.Status == "active" { + count++ + } + } + return count +} + +func countUniqueCollectionDataCenters(collections []dash.CollectionInfo) int { + dcMap := make(map[string]bool) + for _, collection := range collections { + dcMap[collection.DataCenter] = true + } + return len(dcMap) +} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_collections_templ.go b/weed/admin/view/app/cluster_collections_templ.go new file mode 100644 index 000000000..f68a3db77 --- /dev/null +++ b/weed/admin/view/app/cluster_collections_templ.go @@ -0,0 +1,346 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func ClusterCollections(data dash.ClusterCollectionsData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Cluster Collections

Total Collections
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalCollections)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 37, Col: 77} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Active Collections
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveCollections(data.Collections))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 57, Col: 96} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Total Volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 77, Col: 73} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Total Files
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFiles)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 97, Col: 71} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Total Storage Size
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 120, Col: 64} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Data Centers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countUniqueCollectionDataCenters(data.Collections))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 140, Col: 106} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Collection Details
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(data.Collections) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, collection := range data.Collections { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
Collection NameData CenterReplicationVolumesFilesSizeTTLDisk TypeStatusActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Name) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 181, Col: 68} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(collection.DataCenter) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 184, Col: 105} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Replication) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 187, Col: 95} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var11 string + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.VolumeCount)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 192, Col: 90} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var12 string + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", collection.FileCount)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 198, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(collection.TotalSize)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 204, Col: 82} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if collection.TTL != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var14 string + templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(collection.TTL) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 209, Col: 104} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "None") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var15 string + templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(collection.DiskType) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 215, Col: 97} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var16 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(collection.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var16...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var18 string + templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(collection.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 219, Col: 66} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "
No Collections Found

No collections are currently configured in the cluster.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var20 string + templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_collections.templ`, Line: 263, Col: 81} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "
Create New Collection
Enter a unique name for the collection
Optional: Specify how long files should be kept
Delete Collection

Are you sure you want to delete the collection ?

This action cannot be undone. All volumes in this collection will be affected.
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func countActiveCollections(collections []dash.CollectionInfo) int { + count := 0 + for _, collection := range collections { + if collection.Status == "active" { + count++ + } + } + return count +} + +func countUniqueCollectionDataCenters(collections []dash.CollectionInfo) int { + dcMap := make(map[string]bool) + for _, collection := range collections { + dcMap[collection.DataCenter] = true + } + return len(dcMap) +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_filers.templ b/weed/admin/view/app/cluster_filers.templ new file mode 100644 index 000000000..58efe6f93 --- /dev/null +++ b/weed/admin/view/app/cluster_filers.templ @@ -0,0 +1,163 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ ClusterFilers(data dash.ClusterFilersData) { +
+

+ Filers +

+
+
+ +
+
+
+ +
+ +
+
+
+
+
+
+
+ Total Filers +
+
+ { fmt.Sprintf("%d", data.TotalFilers) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Filers +
+
+ { fmt.Sprintf("%d", countActiveFilers(data.Filers)) } +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ Filers +
+
+
+ if len(data.Filers) > 0 { +
+ + + + + + + + + + + + + + for _, filer := range data.Filers { + + + + + + + + + + } + +
AddressVersionData CenterRackCreated AtStatusActions
+ + { filer.Address } + + + + { filer.Version } + + { filer.DataCenter } + + { filer.Rack } + + if !filer.CreatedAt.IsZero() { + { filer.CreatedAt.Format("2006-01-02 15:04:05") } + } else { + N/A + } + + + { filer.Status } + + +
+ + +
+
+
+ } else { +
+ +
No Filers Found
+

No filer servers are currently available in the cluster.

+
+ } +
+
+ + +
+
+ + + Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } + +
+
+
+} + +func countActiveFilers(filers []dash.FilerInfo) int { + count := 0 + for _, filer := range filers { + if filer.Status == "active" { + count++ + } + } + return count +} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_filers_templ.go b/weed/admin/view/app/cluster_filers_templ.go new file mode 100644 index 000000000..c19a94ebc --- /dev/null +++ b/weed/admin/view/app/cluster_filers_templ.go @@ -0,0 +1,252 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func ClusterFilers(data dash.ClusterFilersData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Filers

Total Filers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalFilers)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 34, Col: 46} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Active Filers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveFilers(data.Filers))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 54, Col: 60} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Filers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(data.Filers) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, filer := range data.Filers { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
AddressVersionData CenterRackCreated AtStatusActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Address) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 93, Col: 27} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Version) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 98, Col: 65} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(filer.DataCenter) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 101, Col: 68} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Rack) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 104, Col: 62} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if !filer.CreatedAt.IsZero() { + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(filer.CreatedAt.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 108, Col: 59} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "N/A") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(filer.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var10...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var12 string + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(filer.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 115, Col: 26} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templ.RenderScriptItems(ctx, templ_7745c5c3_Buffer, templ.ComponentScript{Call: fmt.Sprintf("window.open('http://%s', '_blank')", filer.Address)}) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
No Filers Found

No filer servers are currently available in the cluster.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var14 string + templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_filers.templ`, Line: 148, Col: 67} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func countActiveFilers(filers []dash.FilerInfo) int { + count := 0 + for _, filer := range filers { + if filer.Status == "active" { + count++ + } + } + return count +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_masters.templ b/weed/admin/view/app/cluster_masters.templ new file mode 100644 index 000000000..c31c635ab --- /dev/null +++ b/weed/admin/view/app/cluster_masters.templ @@ -0,0 +1,209 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ ClusterMasters(data dash.ClusterMastersData) { +
+

+ Masters +

+
+
+ +
+
+
+ +
+ +
+
+
+
+
+
+
+ Total Masters +
+
+ { fmt.Sprintf("%d", data.TotalMasters) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Masters +
+
+ { fmt.Sprintf("%d", countActiveMasters(data.Masters)) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Leaders +
+
+ { fmt.Sprintf("%d", data.LeaderCount) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Cluster Health +
+
+ if data.LeaderCount > 0 { + Healthy + } else { + Warning + } +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ Masters +
+
+
+ if len(data.Masters) > 0 { +
+ + + + + + + + + + + + for _, master := range data.Masters { + + + + + + + + } + +
AddressRoleSuffrageStatusActions
+ + { master.Address } + + + + if master.IsLeader { + + Leader + + } else { + + Follower + + } + + if master.Suffrage != "" { + + { master.Suffrage } + + } else { + - + } + + + { master.Status } + + +
+ + +
+
+
+ } else { +
+ +
No Masters Found
+

No master servers are currently available in the cluster.

+
+ } +
+
+ + +
+
+ + + Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } + +
+
+
+} + +func countActiveMasters(masters []dash.MasterInfo) int { + count := 0 + for _, master := range masters { + if master.Status == "active" { + count++ + } + } + return count +} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_masters_templ.go b/weed/admin/view/app/cluster_masters_templ.go new file mode 100644 index 000000000..636d831c3 --- /dev/null +++ b/weed/admin/view/app/cluster_masters_templ.go @@ -0,0 +1,247 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func ClusterMasters(data dash.ClusterMastersData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Masters

Total Masters
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalMasters)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 34, Col: 47} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Active Masters
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveMasters(data.Masters))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 54, Col: 62} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Leaders
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.LeaderCount)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 74, Col: 46} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Cluster Health
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.LeaderCount > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "Healthy") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "Warning") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "
Masters
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(data.Masters) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, master := range data.Masters { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
AddressRoleSuffrageStatusActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(master.Address) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 135, Col: 28} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if master.IsLeader { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "Leader") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "Follower") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if master.Suffrage != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(master.Suffrage) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 153, Col: 30} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "-") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(master.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var8...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(master.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 161, Col: 27} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
No Masters Found

No master servers are currently available in the cluster.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var11 string + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_masters.templ`, Line: 194, Col: 67} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func countActiveMasters(masters []dash.MasterInfo) int { + count := 0 + for _, master := range masters { + if master.Status == "active" { + count++ + } + } + return count +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_volume_servers.templ b/weed/admin/view/app/cluster_volume_servers.templ new file mode 100644 index 000000000..3a54d01b9 --- /dev/null +++ b/weed/admin/view/app/cluster_volume_servers.templ @@ -0,0 +1,221 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ ClusterVolumeServers(data dash.ClusterVolumeServersData) { +
+

+ Volume Servers +

+
+
+ +
+
+
+ +
+ +
+
+
+
+
+
+
+ Total Volume Servers +
+
+ {fmt.Sprintf("%d", data.TotalVolumeServers)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Volume Servers +
+
+ {fmt.Sprintf("%d", countActiveVolumeServers(data.VolumeServers))} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Volumes +
+
+ {fmt.Sprintf("%d", data.TotalVolumes)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Capacity +
+
+ {formatBytes(data.TotalCapacity)} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ Volume Servers +
+
+
+ if len(data.VolumeServers) > 0 { +
+ + + + + + + + + + + + + + + + for _, host := range data.VolumeServers { + + + + + + + + + + + + } + +
Server IDAddressData CenterRackVolumesCapacityUsageStatusActions
+ {host.ID} + + + {host.Address} + + + + {host.DataCenter} + + {host.Rack} + +
+
+
+
+
+ {fmt.Sprintf("%d/%d", host.Volumes, host.MaxVolumes)} +
+
{formatBytes(host.DiskCapacity)} +
+
+
+
+
+ {formatBytes(host.DiskUsage)} +
+
+ + {host.Status} + + +
+ + +
+
+
+ } else { +
+ +
No Volume Servers Found
+

No volume servers are currently available in the cluster.

+
+ } +
+
+ + +
+
+ + + Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} + +
+
+
+} + +func countActiveVolumeServers(volumeServers []dash.VolumeServer) int { + count := 0 + for _, server := range volumeServers { + if server.Status == "active" { + count++ + } + } + return count +} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_volume_servers_templ.go b/weed/admin/view/app/cluster_volume_servers_templ.go new file mode 100644 index 000000000..6c3546362 --- /dev/null +++ b/weed/admin/view/app/cluster_volume_servers_templ.go @@ -0,0 +1,306 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func ClusterVolumeServers(data dash.ClusterVolumeServersData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Volume Servers

Total Volume Servers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumeServers)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 34, Col: 79} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Active Volume Servers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveVolumeServers(data.VolumeServers))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 54, Col: 100} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Total Volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 74, Col: 73} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Total Capacity
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalCapacity)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 94, Col: 68} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
Volume Servers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(data.VolumeServers) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, host := range data.VolumeServers { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
Server IDAddressData CenterRackVolumesCapacityUsageStatusActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(host.ID) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 134, Col: 58} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(host.Address) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 138, Col: 61} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(host.DataCenter) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 143, Col: 99} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(host.Rack) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 146, Col: 93} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var12 string + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d/%d", host.Volumes, host.MaxVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 155, Col: 107} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskCapacity)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 158, Col: 75} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var15 string + templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(host.DiskUsage)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 166, Col: 83} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var16 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(host.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var16...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var18 string + templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(host.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 171, Col: 60} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "
No Volume Servers Found

No volume servers are currently available in the cluster.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var19 string + templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volume_servers.templ`, Line: 206, Col: 81} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func countActiveVolumeServers(volumeServers []dash.VolumeServer) int { + count := 0 + for _, server := range volumeServers { + if server.Status == "active" { + count++ + } + } + return count +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/cluster_volumes.templ b/weed/admin/view/app/cluster_volumes.templ new file mode 100644 index 000000000..376ca50b1 --- /dev/null +++ b/weed/admin/view/app/cluster_volumes.templ @@ -0,0 +1,414 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ ClusterVolumes(data dash.ClusterVolumesData) { +
+

+ Cluster Volumes +

+
+
+ + +
+
+
+ +
+ +
+
+
+
+
+
+
+ Total Volumes +
+
+ {fmt.Sprintf("%d", data.TotalVolumes)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Volumes +
+
+ {fmt.Sprintf("%d", countActiveVolumes(data.Volumes))} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Data Centers +
+
+ {fmt.Sprintf("%d", countUniqueDataCenters(data.Volumes))} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Size +
+
+ {formatBytes(data.TotalSize)} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ Volume Details +
+
+
+ if len(data.Volumes) > 0 { + + + +
+
+ + Showing {fmt.Sprintf("%d", (data.CurrentPage-1)*data.PageSize + 1)} to {fmt.Sprintf("%d", minInt(data.CurrentPage*data.PageSize, data.TotalVolumes))} of {fmt.Sprintf("%d", data.TotalVolumes)} volumes + +
+ if data.TotalPages > 1 { +
+ + Page {fmt.Sprintf("%d", data.CurrentPage)} of {fmt.Sprintf("%d", data.TotalPages)} + +
+ } +
+ + + if data.TotalPages > 1 { +
+ +
+ } + } else { +
+ +
No Volumes Found
+

No volumes are currently available in the cluster.

+
+ } +
+
+ + +
+
+ + + Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} + +
+
+
+ + + +} + +func countActiveVolumes(volumes []dash.VolumeInfo) int { + count := 0 + for _, volume := range volumes { + if volume.Status == "active" { + count++ + } + } + return count +} + +func countUniqueDataCenters(volumes []dash.VolumeInfo) int { + dcMap := make(map[string]bool) + for _, volume := range volumes { + dcMap[volume.DataCenter] = true + } + return len(dcMap) +} + +templ getSortIcon(column, currentSort, currentOrder string) { + if column != currentSort { + + } else if currentOrder == "asc" { + + } else { + + } +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} \ No newline at end of file diff --git a/weed/admin/view/app/cluster_volumes_templ.go b/weed/admin/view/app/cluster_volumes_templ.go new file mode 100644 index 000000000..9bee1b8b5 --- /dev/null +++ b/weed/admin/view/app/cluster_volumes_templ.go @@ -0,0 +1,661 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func ClusterVolumes(data dash.ClusterVolumesData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Cluster Volumes

Total Volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 40, Col: 73} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
Active Volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveVolumes(data.Volumes))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 60, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "
Data Centers
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countUniqueDataCenters(data.Volumes))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 80, Col: 92} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "
Total Size
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 100, Col: 64} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
Volume Details
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(data.Volumes) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, volume := range data.Volumes { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "
Volume ID") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("id", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "Server") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("server", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "Data Center") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("datacenter", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "Rack") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("rack", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "Collection") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("collection", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "Size") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("size", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "File Count") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("filecount", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "Replication") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("replication", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "Status") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = getSortIcon("status", data.SortBy, data.SortOrder).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "Actions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.ID)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 186, Col: 79} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Server) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 190, Col: 62} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(volume.DataCenter) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 195, Col: 101} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Rack) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 198, Col: 95} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var11 string + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Collection) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 201, Col: 95} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var12 string + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(volume.Size)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 203, Col: 69} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", volume.FileCount)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 204, Col: 80} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var14 string + templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Replication) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 206, Col: 91} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var15 = []any{fmt.Sprintf("badge bg-%s", getStatusColor(volume.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var15...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var17 string + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(volume.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 210, Col: 62} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "
Showing ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var18 string + templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", (data.CurrentPage-1)*data.PageSize+1)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 239, Col: 98} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, " to ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var19 string + templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", minInt(data.CurrentPage*data.PageSize, data.TotalVolumes))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 239, Col: 180} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, " of ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var20 string + templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalVolumes)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 239, Col: 222} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, " volumes
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.TotalPages > 1 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "
Page ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var21 string + templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.CurrentPage)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 245, Col: 77} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, " of ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var22 string + templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalPages)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 245, Col: 117} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.TotalPages > 1 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "
No Volumes Found

No volumes are currently available in the cluster.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var28 string + templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/cluster_volumes.templ`, Line: 317, Col: 81} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func countActiveVolumes(volumes []dash.VolumeInfo) int { + count := 0 + for _, volume := range volumes { + if volume.Status == "active" { + count++ + } + } + return count +} + +func countUniqueDataCenters(volumes []dash.VolumeInfo) int { + dcMap := make(map[string]bool) + for _, volume := range volumes { + dcMap[volume.DataCenter] = true + } + return len(dcMap) +} + +func getSortIcon(column, currentSort, currentOrder string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var29 := templ.GetChildren(ctx) + if templ_7745c5c3_Var29 == nil { + templ_7745c5c3_Var29 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + if column != currentSort { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if currentOrder == "asc" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + return nil + }) +} + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/file_browser.templ b/weed/admin/view/app/file_browser.templ new file mode 100644 index 000000000..ee35468ce --- /dev/null +++ b/weed/admin/view/app/file_browser.templ @@ -0,0 +1,438 @@ +package app + +import ( + "fmt" + "path/filepath" + "strings" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ FileBrowser(data dash.FileBrowserData) { +
+

+ if data.IsBucketPath && data.BucketName != "" { + S3 Bucket: {data.BucketName} + } else { + File Browser + } +

+
+
+ if data.IsBucketPath && data.BucketName != "" { + + Back to Buckets + + } + + + + +
+
+
+ + + + + +
+
+
+
+
+
+
+ Total Entries +
+
+ { fmt.Sprintf("%d", data.TotalEntries) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Directories +
+
+ { fmt.Sprintf("%d", countDirectories(data.Entries)) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Files +
+
+ { fmt.Sprintf("%d", countFiles(data.Entries)) } +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Size +
+
+ { formatBytes(data.TotalSize) } +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ + if data.CurrentPath == "/" { + Root Directory + } else if data.CurrentPath == "/buckets" { + S3 Buckets Directory + + Manage Buckets + + } else { + { filepath.Base(data.CurrentPath) } + } +
+ if data.ParentPath != data.CurrentPath { + + Up + + } +
+
+ if len(data.Entries) > 0 { +
+ + + + + + + + + + + + + + for _, entry := range data.Entries { + + + + + + + + + + } + +
+ + NameSizeTypeModifiedPermissionsActions
+ + +
+ if entry.IsDirectory { + + + { entry.Name } + + } else { + + { entry.Name } + } +
+
+ if entry.IsDirectory { + + } else { + { formatBytes(entry.Size) } + } + + + if entry.IsDirectory { + Directory + } else { + { getMimeDisplayName(entry.Mime) } + } + + + if !entry.ModTime.IsZero() { + { entry.ModTime.Format("2006-01-02 15:04") } + } else { + + } + + { entry.Mode } + +
+ if !entry.IsDirectory { + + + } + + +
+
+
+ } else { +
+ +
Empty Directory
+

This directory contains no files or subdirectories.

+
+ } +
+
+ + +
+
+ + + Last updated: { data.LastUpdated.Format("2006-01-02 15:04:05") } + +
+
+ + + + + + +} + +func countDirectories(entries []dash.FileEntry) int { + count := 0 + for _, entry := range entries { + if entry.IsDirectory { + count++ + } + } + return count +} + +func countFiles(entries []dash.FileEntry) int { + count := 0 + for _, entry := range entries { + if !entry.IsDirectory { + count++ + } + } + return count +} + +func getFileIcon(mime string) string { + switch { + case strings.HasPrefix(mime, "image/"): + return "fa-image" + case strings.HasPrefix(mime, "video/"): + return "fa-video" + case strings.HasPrefix(mime, "audio/"): + return "fa-music" + case strings.HasPrefix(mime, "text/"): + return "fa-file-text" + case mime == "application/pdf": + return "fa-file-pdf" + case mime == "application/zip" || strings.Contains(mime, "archive"): + return "fa-file-archive" + case mime == "application/json": + return "fa-file-code" + case strings.Contains(mime, "script") || strings.Contains(mime, "javascript"): + return "fa-file-code" + default: + return "fa-file" + } +} + +func getMimeDisplayName(mime string) string { + switch mime { + case "text/plain": + return "Text" + case "text/html": + return "HTML" + case "application/json": + return "JSON" + case "application/pdf": + return "PDF" + case "image/jpeg": + return "JPEG" + case "image/png": + return "PNG" + case "image/gif": + return "GIF" + case "video/mp4": + return "MP4" + case "audio/mpeg": + return "MP3" + case "application/zip": + return "ZIP" + default: + if strings.HasPrefix(mime, "image/") { + return "Image" + } else if strings.HasPrefix(mime, "video/") { + return "Video" + } else if strings.HasPrefix(mime, "audio/") { + return "Audio" + } else if strings.HasPrefix(mime, "text/") { + return "Text" + } + return "File" + } +} \ No newline at end of file diff --git a/weed/admin/view/app/file_browser_templ.go b/weed/admin/view/app/file_browser_templ.go new file mode 100644 index 000000000..003f43249 --- /dev/null +++ b/weed/admin/view/app/file_browser_templ.go @@ -0,0 +1,607 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" + "path/filepath" + "strings" +) + +func FileBrowser(data dash.FileBrowserData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.IsBucketPath && data.BucketName != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "S3 Bucket: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(data.BucketName) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 14, Col: 63} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "File Browser") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.IsBucketPath && data.BucketName != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "Back to Buckets ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
Total Entries
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalEntries)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 77, Col: 46} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
Directories
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countDirectories(data.Entries))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 97, Col: 59} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
Files
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countFiles(data.Entries))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 117, Col: 53} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "
Total Size
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 137, Col: 37} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.CurrentPath == "/" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "Root Directory") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else if data.CurrentPath == "/buckets" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "S3 Buckets Directory Manage Buckets") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(filepath.Base(data.CurrentPath)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 162, Col: 37} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if data.ParentPath != data.CurrentPath { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "Up") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if len(data.Entries) > 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, entry := range data.Entries { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "
NameSizeTypeModifiedPermissionsActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if entry.IsDirectory { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var14 string + templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Name) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 199, Col: 25} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + var templ_7745c5c3_Var15 = []any{fmt.Sprintf("fas %s text-muted me-2", getFileIcon(entry.Mime))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var15...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var17 string + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Name) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 203, Col: 30} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if entry.IsDirectory { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + var templ_7745c5c3_Var18 string + templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(entry.Size)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 211, Col: 36} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if entry.IsDirectory { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "Directory") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + var templ_7745c5c3_Var19 string + templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(getMimeDisplayName(entry.Mime)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 219, Col: 44} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if !entry.ModTime.IsZero() { + var templ_7745c5c3_Var20 string + templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(entry.ModTime.Format("2006-01-02 15:04")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 225, Col: 53} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var21 string + templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Mode) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 231, Col: 42} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if !entry.IsDirectory { + templ_7745c5c3_Err = templ.RenderScriptItems(ctx, templ_7745c5c3_Buffer, templ.ComponentScript{Call: fmt.Sprintf("downloadFile('%s')", entry.FullPath)}) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templ.RenderScriptItems(ctx, templ_7745c5c3_Buffer, templ.ComponentScript{Call: fmt.Sprintf("viewFile('%s')", entry.FullPath)}) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templ.RenderScriptItems(ctx, templ_7745c5c3_Buffer, templ.ComponentScript{Call: fmt.Sprintf("showProperties('%s')", entry.FullPath)}) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, " ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templ.RenderScriptItems(ctx, templ_7745c5c3_Buffer, templ.ComponentScript{Call: fmt.Sprintf("confirmDelete('%s')", entry.FullPath)}) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } else { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "
Empty Directory

This directory contains no files or subdirectories.

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "
Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var26 string + templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/file_browser.templ`, Line: 271, Col: 66} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "
Create New Folder
Folder names cannot contain / or \\ characters.
Upload Files
Choose one or more files to upload to the current directory. You can select multiple files by holding Ctrl (Cmd on Mac) while clicking.
0%
Preparing upload...
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func countDirectories(entries []dash.FileEntry) int { + count := 0 + for _, entry := range entries { + if entry.IsDirectory { + count++ + } + } + return count +} + +func countFiles(entries []dash.FileEntry) int { + count := 0 + for _, entry := range entries { + if !entry.IsDirectory { + count++ + } + } + return count +} + +func getFileIcon(mime string) string { + switch { + case strings.HasPrefix(mime, "image/"): + return "fa-image" + case strings.HasPrefix(mime, "video/"): + return "fa-video" + case strings.HasPrefix(mime, "audio/"): + return "fa-music" + case strings.HasPrefix(mime, "text/"): + return "fa-file-text" + case mime == "application/pdf": + return "fa-file-pdf" + case mime == "application/zip" || strings.Contains(mime, "archive"): + return "fa-file-archive" + case mime == "application/json": + return "fa-file-code" + case strings.Contains(mime, "script") || strings.Contains(mime, "javascript"): + return "fa-file-code" + default: + return "fa-file" + } +} + +func getMimeDisplayName(mime string) string { + switch mime { + case "text/plain": + return "Text" + case "text/html": + return "HTML" + case "application/json": + return "JSON" + case "application/pdf": + return "PDF" + case "image/jpeg": + return "JPEG" + case "image/png": + return "PNG" + case "image/gif": + return "GIF" + case "video/mp4": + return "MP4" + case "audio/mpeg": + return "MP3" + case "application/zip": + return "ZIP" + default: + if strings.HasPrefix(mime, "image/") { + return "Image" + } else if strings.HasPrefix(mime, "video/") { + return "Video" + } else if strings.HasPrefix(mime, "audio/") { + return "Audio" + } else if strings.HasPrefix(mime, "text/") { + return "Text" + } + return "File" + } +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/object_store_users.templ b/weed/admin/view/app/object_store_users.templ new file mode 100644 index 000000000..2329a0178 --- /dev/null +++ b/weed/admin/view/app/object_store_users.templ @@ -0,0 +1,214 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ ObjectStoreUsers(data dash.ObjectStoreUsersData) { +
+ +
+
+

+ Object Store Users +

+

Manage S3 API users and their access credentials

+
+
+ +
+
+ + +
+
+
+
+
+
+
+ Total Users +
+
+ {fmt.Sprintf("%d", data.TotalUsers)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Users +
+
+ {fmt.Sprintf("%d", countActiveUsers(data.Users))} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Last Updated +
+
+ {data.LastUpdated.Format("15:04")} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
+ Object Store Users +
+ +
+
+
+ + + + + + + + + + + + + + for _, user := range data.Users { + + + + + + + + + + } + if len(data.Users) == 0 { + + + + } + +
UsernameEmailAccess KeyStatusCreatedLast LoginActions
+
+ + {user.Username} +
+
{user.Email} + {user.AccessKey} + + + {user.Status} + + {user.CreatedAt.Format("2006-01-02")}{user.LastLogin.Format("2006-01-02")} +
+ + +
+
+ +
+
No users found
+

Create your first object store user to get started.

+
+
+
+
+
+
+
+ + +
+
+ + + Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} + +
+
+
+} + +// Helper functions for template +func getUserStatusColor(status string) string { + switch status { + case "active": + return "success" + case "inactive": + return "warning" + case "suspended": + return "danger" + default: + return "secondary" + } +} + +func countActiveUsers(users []dash.ObjectStoreUser) int { + count := 0 + for _, user := range users { + if user.Status == "active" { + count++ + } + } + return count +} + \ No newline at end of file diff --git a/weed/admin/view/app/object_store_users_templ.go b/weed/admin/view/app/object_store_users_templ.go new file mode 100644 index 000000000..d2af6ea27 --- /dev/null +++ b/weed/admin/view/app/object_store_users_templ.go @@ -0,0 +1,237 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func ObjectStoreUsers(data dash.ObjectStoreUsersData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

Object Store Users

Manage S3 API users and their access credentials

Total Users
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalUsers)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 38, Col: 71} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Active Users
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveUsers(data.Users))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 58, Col: 84} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Last Updated
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("15:04")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 78, Col: 69} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Object Store Users
Actions:
Export List
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, user := range data.Users { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + if len(data.Users) == 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "
UsernameEmailAccess KeyStatusCreatedLast LoginActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(user.Username) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 130, Col: 74} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(user.Email) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 133, Col: 59} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(user.AccessKey) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 135, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 = []any{fmt.Sprintf("badge bg-%s", getUserStatusColor(user.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var8...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(user.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 139, Col: 64} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var11 string + templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(user.CreatedAt.Format("2006-01-02")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 142, Col: 84} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var12 string + templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(user.LastLogin.Format("2006-01-02")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 143, Col: 84} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "
No users found

Create your first object store user to get started.

Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/object_store_users.templ`, Line: 184, Col: 81} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// Helper functions for template +func getUserStatusColor(status string) string { + switch status { + case "active": + return "success" + case "inactive": + return "warning" + case "suspended": + return "danger" + default: + return "secondary" + } +} + +func countActiveUsers(users []dash.ObjectStoreUser) int { + count := 0 + for _, user := range users { + if user.Status == "active" { + count++ + } + } + return count +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/s3_buckets.templ b/weed/admin/view/app/s3_buckets.templ new file mode 100644 index 000000000..c118eea5b --- /dev/null +++ b/weed/admin/view/app/s3_buckets.templ @@ -0,0 +1,302 @@ +package app + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +templ S3Buckets(data dash.S3BucketsData) { +
+

+ S3 Buckets +

+
+
+ + +
+
+
+ +
+ +
+
+
+
+
+
+
+ Total Buckets +
+
+ {fmt.Sprintf("%d", data.TotalBuckets)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Total Storage +
+
+ {formatBytes(data.TotalSize)} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Active Buckets +
+
+ {fmt.Sprintf("%d", countActiveBuckets(data.Buckets))} +
+
+
+ +
+
+
+
+
+ +
+
+
+
+
+
+ Last Updated +
+
+ {data.LastUpdated.Format("15:04:05")} +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
+ S3 Buckets +
+ +
+
+
+ + + + + + + + + + + + + for _, bucket := range data.Buckets { + + + + + + + + + } + if len(data.Buckets) == 0 { + + + + } + +
NameCreatedObjectsSizeStatusActions
+ + + {bucket.Name} + + {bucket.CreatedAt.Format("2006-01-02 15:04")}{fmt.Sprintf("%d", bucket.ObjectCount)}{formatBytes(bucket.Size)} + + {bucket.Status} + + +
+ + + + + + + +
+
+ +
+
No S3 buckets found
+

Create your first bucket to get started with S3 storage.

+ +
+
+
+
+
+
+
+ + +
+
+ + + Last updated: {data.LastUpdated.Format("2006-01-02 15:04:05")} + +
+
+
+ + + + + + +} + +// Helper functions for template +func getBucketStatusColor(status string) string { + switch status { + case "active": + return "success" + case "error": + return "danger" + case "warning": + return "warning" + default: + return "secondary" + } +} + +func countActiveBuckets(buckets []dash.S3Bucket) int { + count := 0 + for _, bucket := range buckets { + if bucket.Status == "active" { + count++ + } + } + return count +} \ No newline at end of file diff --git a/weed/admin/view/app/s3_buckets_templ.go b/weed/admin/view/app/s3_buckets_templ.go new file mode 100644 index 000000000..9038b9dbf --- /dev/null +++ b/weed/admin/view/app/s3_buckets_templ.go @@ -0,0 +1,277 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package app + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/seaweedfs/seaweedfs/weed/admin/dash" +) + +func S3Buckets(data dash.S3BucketsData) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "

S3 Buckets

Total Buckets
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var2 string + templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", data.TotalBuckets)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 37, Col: 73} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "
Total Storage
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(data.TotalSize)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 57, Col: 64} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "
Active Buckets
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var4 string + templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", countActiveBuckets(data.Buckets))) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 77, Col: 88} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "
Last Updated
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 97, Col: 72} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "
S3 Buckets
Actions:
Export List
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + for _, bucket := range data.Buckets { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + if len(data.Buckets) == 0 { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "
NameCreatedObjectsSizeStatusActions
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(bucket.Name) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 149, Col: 64} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var8 string + templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(bucket.CreatedAt.Format("2006-01-02 15:04")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 152, Col: 92} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var9 string + templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", bucket.ObjectCount)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 153, Col: 86} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var10 string + templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(formatBytes(bucket.Size)) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 154, Col: 73} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var11 = []any{fmt.Sprintf("badge bg-%s", getBucketStatusColor(bucket.Status))} + templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var11...) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var13 string + templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(bucket.Status) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 157, Col: 66} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "
No S3 buckets found

Create your first bucket to get started with S3 storage.

Last updated: ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var17 string + templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(data.LastUpdated.Format("2006-01-02 15:04:05")) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/app/s3_buckets.templ`, Line: 211, Col: 81} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "
Create New S3 Bucket
Bucket names must be between 3 and 63 characters, contain only lowercase letters, numbers, dots, and hyphens.
Delete Bucket

Are you sure you want to delete the bucket ?

Warning: This action cannot be undone. All objects in the bucket will be permanently deleted.
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +// Helper functions for template +func getBucketStatusColor(status string) string { + switch status { + case "active": + return "success" + case "error": + return "danger" + case "warning": + return "warning" + default: + return "secondary" + } +} + +func countActiveBuckets(buckets []dash.S3Bucket) int { + count := 0 + for _, bucket := range buckets { + if bucket.Status == "active" { + count++ + } + } + return count +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/admin/view/app/template_helpers.go b/weed/admin/view/app/template_helpers.go new file mode 100644 index 000000000..04eebb272 --- /dev/null +++ b/weed/admin/view/app/template_helpers.go @@ -0,0 +1,84 @@ +package app + +import ( + "fmt" + "strconv" +) + +// getStatusColor returns Bootstrap color class for status +func getStatusColor(status string) string { + switch status { + case "active", "healthy": + return "success" + case "warning": + return "warning" + case "critical", "unreachable": + return "danger" + default: + return "secondary" + } +} + +// getHealthColor returns Bootstrap color class for health status +func getHealthColor(health string) string { + switch health { + case "excellent": + return "success" + case "good": + return "primary" + case "fair": + return "warning" + case "poor": + return "danger" + default: + return "secondary" + } +} + +// formatBytes converts bytes to human readable format +func formatBytes(bytes int64) string { + if bytes == 0 { + return "0 B" + } + + units := []string{"B", "KB", "MB", "GB", "TB", "PB"} + var i int + value := float64(bytes) + + for value >= 1024 && i < len(units)-1 { + value /= 1024 + i++ + } + + if i == 0 { + return fmt.Sprintf("%.0f %s", value, units[i]) + } + return fmt.Sprintf("%.1f %s", value, units[i]) +} + +// formatNumber formats large numbers with commas +func formatNumber(num int64) string { + if num == 0 { + return "0" + } + + str := strconv.FormatInt(num, 10) + result := "" + + for i, char := range str { + if i > 0 && (len(str)-i)%3 == 0 { + result += "," + } + result += string(char) + } + + return result +} + +// calculatePercent calculates percentage for progress bars +func calculatePercent(current, max int) int { + if max == 0 { + return 0 + } + return (current * 100) / max +} diff --git a/weed/admin/view/layout/layout.templ b/weed/admin/view/layout/layout.templ new file mode 100644 index 000000000..acc4f1a81 --- /dev/null +++ b/weed/admin/view/layout/layout.templ @@ -0,0 +1,263 @@ +package layout + +import ( + "fmt" + "time" + "github.com/gin-gonic/gin" +) + +templ Layout(c *gin.Context, content templ.Component) { +{{ + username := c.GetString("username") + if username == "" { + username = "admin" + } +}} + + + + + SeaweedFS Admin + + + + + + + + + + + + + +
+ + + +
+ + + + +
+
+ @content +
+
+
+
+ + +
+
+ + © {fmt.Sprintf("%d", time.Now().Year())} SeaweedFS Admin + +
+
+ + + + + + + +} + +templ LoginForm(c *gin.Context, title string, errorMessage string) { + + + + + {title} - Login + + + + + + +
+
+
+
+
+
+ +

{title}

+

Please sign in to continue

+
+ + if errorMessage != "" { + + } + +
+
+ +
+ + + + +
+
+ +
+ +
+ + + + +
+
+ + +
+
+
+
+
+
+ + + + +} \ No newline at end of file diff --git a/weed/admin/view/layout/layout_templ.go b/weed/admin/view/layout/layout_templ.go new file mode 100644 index 000000000..9a8afb241 --- /dev/null +++ b/weed/admin/view/layout/layout_templ.go @@ -0,0 +1,163 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.833 +package layout + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "fmt" + "github.com/gin-gonic/gin" + "time" +) + +func Layout(c *gin.Context, content templ.Component) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + + username := c.GetString("username") + if username == "" { + username = "admin" + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "SeaweedFS Admin
© ") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var3 string + templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", time.Now().Year())) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 186, Col: 60} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, " SeaweedFS Admin
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func LoginForm(c *gin.Context, title string, errorMessage string) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var4 := templ.GetChildren(ctx) + if templ_7745c5c3_Var4 == nil { + templ_7745c5c3_Var4 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(title) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 204, Col: 17} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, " - Login

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(title) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 218, Col: 57} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "

Please sign in to continue

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + if errorMessage != "" { + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var7 string + templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(errorMessage) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `view/layout/layout.templ`, Line: 225, Col: 45} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "
") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +var _ = templruntime.GeneratedTemplate diff --git a/weed/command/admin.go b/weed/command/admin.go new file mode 100644 index 000000000..ef1d54bb3 --- /dev/null +++ b/weed/command/admin.go @@ -0,0 +1,236 @@ +package command + +import ( + "context" + "crypto/rand" + "crypto/tls" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/gin-contrib/sessions" + "github.com/gin-contrib/sessions/cookie" + "github.com/gin-gonic/gin" + + "github.com/seaweedfs/seaweedfs/weed/admin/dash" + "github.com/seaweedfs/seaweedfs/weed/admin/handlers" +) + +var ( + a AdminOptions +) + +type AdminOptions struct { + port *int + masters *string + tlsCertPath *string + tlsKeyPath *string + adminUser *string + adminPassword *string +} + +func init() { + cmdAdmin.Run = runAdmin // break init cycle + a.port = cmdAdmin.Flag.Int("port", 23646, "admin server port") + a.masters = cmdAdmin.Flag.String("masters", "localhost:9333", "comma-separated master servers") + a.tlsCertPath = cmdAdmin.Flag.String("tlsCert", "", "path to TLS certificate file") + a.tlsKeyPath = cmdAdmin.Flag.String("tlsKey", "", "path to TLS private key file") + + a.adminUser = cmdAdmin.Flag.String("adminUser", "admin", "admin interface username") + a.adminPassword = cmdAdmin.Flag.String("adminPassword", "", "admin interface password (if empty, auth is disabled)") +} + +var cmdAdmin = &Command{ + UsageLine: "admin -port=23646 -masters=localhost:9333", + Short: "start SeaweedFS web admin interface", + Long: `Start a web admin interface for SeaweedFS cluster management. + + The admin interface provides a modern web interface for: + - Cluster topology visualization and monitoring + - Volume management and operations + - File browser and management + - System metrics and performance monitoring + - Configuration management + - Maintenance operations + + The admin interface automatically discovers filers from the master servers. + + Example Usage: + weed admin -port=23646 -masters="master1:9333,master2:9333" + weed admin -port=443 -tlsCert=/etc/ssl/admin.crt -tlsKey=/etc/ssl/admin.key + + Authentication: + - If adminPassword is not set, the admin interface runs without authentication + - If adminPassword is set, users must login with adminUser/adminPassword + - Sessions are secured with auto-generated session keys + + Security: + - Use HTTPS in production by providing TLS certificates + - Set strong adminPassword for production deployments + - Configure firewall rules to restrict admin interface access + +`, +} + +func runAdmin(cmd *Command, args []string) bool { + // Validate required parameters + if *a.masters == "" { + fmt.Println("Error: masters parameter is required") + fmt.Println("Usage: weed admin -masters=master1:9333,master2:9333") + return false + } + + // Validate TLS configuration + if (*a.tlsCertPath != "" && *a.tlsKeyPath == "") || + (*a.tlsCertPath == "" && *a.tlsKeyPath != "") { + fmt.Println("Error: Both tlsCert and tlsKey must be provided for TLS") + return false + } + + // Security warnings + if *a.adminPassword == "" { + fmt.Println("WARNING: Admin interface is running without authentication!") + fmt.Println(" Set -adminPassword for production use") + } + + if *a.tlsCertPath == "" { + fmt.Println("WARNING: Admin interface is running without TLS encryption!") + fmt.Println(" Use -tlsCert and -tlsKey for production use") + } + + fmt.Printf("Starting SeaweedFS Admin Interface on port %d\n", *a.port) + fmt.Printf("Masters: %s\n", *a.masters) + fmt.Printf("Filers will be discovered automatically from masters\n") + if *a.adminPassword != "" { + fmt.Printf("Authentication: Enabled (user: %s)\n", *a.adminUser) + } else { + fmt.Printf("Authentication: Disabled\n") + } + if *a.tlsCertPath != "" { + fmt.Printf("TLS: Enabled\n") + } else { + fmt.Printf("TLS: Disabled\n") + } + + // Set up graceful shutdown + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Handle interrupt signals + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + go func() { + sig := <-sigChan + fmt.Printf("\nReceived signal %v, shutting down gracefully...\n", sig) + cancel() + }() + + // Start the admin server + err := startAdminServer(ctx, a) + if err != nil { + fmt.Printf("Admin server error: %v\n", err) + return false + } + + fmt.Println("Admin server stopped") + return true +} + +// startAdminServer starts the actual admin server +func startAdminServer(ctx context.Context, options AdminOptions) error { + // Set Gin mode + gin.SetMode(gin.ReleaseMode) + + // Create router + r := gin.New() + r.Use(gin.Logger(), gin.Recovery()) + + // Session store - always auto-generate session key + sessionKeyBytes := make([]byte, 32) + _, err := rand.Read(sessionKeyBytes) + if err != nil { + return fmt.Errorf("failed to generate session key: %v", err) + } + store := cookie.NewStore(sessionKeyBytes) + r.Use(sessions.Sessions("admin-session", store)) + + // Static files - serve from filesystem + staticPath := filepath.Join("weed", "admin", "static") + if _, err := os.Stat(staticPath); err == nil { + r.Static("/static", staticPath) + } else { + log.Printf("Warning: Static files not found at %s", staticPath) + } + + // Create admin server + adminServer := dash.NewAdminServer(*options.masters, nil) + + // Show discovered filers + filers := adminServer.GetAllFilers() + if len(filers) > 0 { + fmt.Printf("Discovered filers: %s\n", strings.Join(filers, ", ")) + } else { + fmt.Printf("No filers discovered from masters\n") + } + + // Create handlers and setup routes + adminHandlers := handlers.NewAdminHandlers(adminServer) + adminHandlers.SetupRoutes(r, *options.adminPassword != "", *options.adminUser, *options.adminPassword) + + // Server configuration + addr := fmt.Sprintf(":%d", *options.port) + server := &http.Server{ + Addr: addr, + Handler: r, + } + + // TLS configuration + if *options.tlsCertPath != "" && *options.tlsKeyPath != "" { + server.TLSConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + } + + // Start server + go func() { + log.Printf("Starting SeaweedFS Admin Server on port %d", *options.port) + + var err error + if *options.tlsCertPath != "" && *options.tlsKeyPath != "" { + log.Printf("Using TLS with cert: %s, key: %s", *options.tlsCertPath, *options.tlsKeyPath) + err = server.ListenAndServeTLS(*options.tlsCertPath, *options.tlsKeyPath) + } else { + err = server.ListenAndServe() + } + + if err != nil && err != http.ErrServerClosed { + log.Printf("Failed to start server: %v", err) + } + }() + + // Wait for context cancellation + <-ctx.Done() + + // Graceful shutdown + log.Println("Shutting down admin server...") + shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if err := server.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("admin server forced to shutdown: %v", err) + } + + return nil +} + +// GetAdminOptions returns the admin command options for testing +func GetAdminOptions() *AdminOptions { + return &AdminOptions{} +} diff --git a/weed/command/command.go b/weed/command/command.go index e3aff4f97..65ddce717 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -9,6 +9,7 @@ import ( ) var Commands = []*Command{ + cmdAdmin, cmdAutocomplete, cmdUnautocomplete, cmdBackup,