Browse Source

fix for mysql and mysql2

pull/5580/head
Konstantin Lebedev 8 months ago
parent
commit
94abb2cc9b
  1. 0
      docker/compose/initdb.d/seaweedfs.sql
  2. 16
      docker/compose/local-k8s-compose.yml
  3. 21
      weed/filer/abstract_sql/abstract_sql_store.go
  4. 3
      weed/filer/mysql/mysql_sql_gen.go
  5. 2
      weed/s3api/s3api_object_handlers_list.go
  6. 2
      weed/server/filer_server_handlers_write_autochunk.go

0
docker/seaweedfs.sql → docker/compose/initdb.d/seaweedfs.sql

16
docker/compose/local-k8s-compose.yml

@ -6,7 +6,7 @@ services:
ports:
- 9333:9333
- 19333:19333
command: "master -ip=master"
command: "master -ip=master -volumeSizeLimitMB 100"
volume:
image: chrislusf/seaweedfs:local
ports:
@ -20,7 +20,7 @@ services:
ports:
- 3306:3306
volumes:
- ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql
- ./initdb.d:/docker-entrypoint-initdb.d
environment:
- MYSQL_ROOT_PASSWORD=secret
- MYSQL_DATABASE=seaweedfs
@ -50,18 +50,11 @@ services:
- master
- volume
- mysql
ingress:
image: jwilder/nginx-proxy:alpine
ports:
- "80:80"
volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro
- ./nginx/proxy.conf:/etc/nginx/proxy.conf
s3:
image: chrislusf/seaweedfs:local
ports:
- 8333:8333
command: '-v 9 s3 -filer="filer:8888"'
command: '-v 9 s3 -ip.bind=0.0.0.0 -filer="filer:8888" -allowEmptyFolder=false -allowDeleteBucketNotEmpty=true -allowListRecursive=true'
depends_on:
- master
- volume
@ -76,7 +69,7 @@ services:
REGISTRY_LOG_LEVEL: "debug"
REGISTRY_STORAGE: "s3"
REGISTRY_STORAGE_S3_REGION: "us-east-1"
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress"
REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333"
REGISTRY_STORAGE_S3_BUCKET: "registry"
REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1"
REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1"
@ -91,4 +84,3 @@ services:
- 5001:5001
depends_on:
- s3
- ingress

21
weed/filer/abstract_sql/abstract_sql_store.go

@ -333,25 +333,31 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
func (store *AbstractSqlStore) ListRecursivePrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, delimiter bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true)
bucketDir := fmt.Sprintf("/buckets/%s", bucket)
if err != nil {
return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err)
}
bucketDir := ""
if bucket != DEFAULT_TABLE {
bucketDir = fmt.Sprintf("/buckets/%s", bucket)
}
shortDir := string(shortPath)
namePrefix := prefix + "%"
var dirPrefix string
isPrefixEndsWithDelimiter := false
if delimiter {
if prefix == "" && len(startFileName) == 0 {
dirPrefix = shortDir
limit += 1
isPrefixEndsWithDelimiter = true
}
} else {
if shortDir == "/" {
if strings.HasSuffix(shortDir, "/") {
dirPrefix = fmt.Sprintf("%s%s%%", shortDir, prefix)
} else {
dirPrefix = fmt.Sprintf("%s/%s%%", shortDir, prefix)
}
}
rows, err := db.QueryContext(ctx, store.GetSqlListRecursive(bucket), startFileName, util.HashStringToLong(shortDir), prefix+"%", dirPrefix, limit+1)
rows, err := db.QueryContext(ctx, store.GetSqlListRecursive(bucket), startFileName, util.HashStringToLong(shortDir), namePrefix, dirPrefix, limit+1)
if err != nil {
glog.Errorf("list %s : %v", dirPath, err)
return lastFileName, fmt.Errorf("list %s : %v", dirPath, err)
@ -365,10 +371,10 @@ func (store *AbstractSqlStore) ListRecursivePrefixedEntries(ctx context.Context,
glog.V(0).Infof("scan %s : %v", dirPath, err)
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
}
if len(dir) != 1 {
fileName = fmt.Sprintf("%s/%s", dir, name)
} else {
if strings.HasSuffix(dir, "/") {
fileName = dir + name
} else {
fileName = fmt.Sprintf("%s/%s", dir, name)
}
lastFileName = fmt.Sprintf("%s%s", dir, name)
entry := &filer.Entry{
@ -383,7 +389,8 @@ func (store *AbstractSqlStore) ListRecursivePrefixedEntries(ctx context.Context,
if !delimiter && isDirectory {
continue
}
if delimiter && shortDir == lastFileName && isDirectory {
glog.V(0).Infof("ListRecursivePrefixedEntries bucket %s, shortDir: %s, bucketDir: %s, lastFileName %s, fileName %s", bucket, shortDir, bucketDir, lastFileName, fileName)
if isPrefixEndsWithDelimiter && shortDir == lastFileName && isDirectory {
continue
}
if !eachEntryFunc(entry) {

3
weed/filer/mysql/mysql_sql_gen.go

@ -50,7 +50,8 @@ func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string {
}
func (gen *SqlGenMysql) GetSqlListRecursive(tableName string) string {
return fmt.Sprintf("SELECT `directory`, `name`, `meta` FROM `%s` WHERE `directory` || `name` > ? AND ((`dirhash` == ? AND `name` like ?) OR `directory` || `name` like ?) ORDER BY `directory` || `name` ASC LIMIT ?", tableName)
//return fmt.Sprintf("SELECT `directory`, `name`, `meta` FROM `%s` WHERE `directory` || `name` > ? AND ((`dirhash` = ? AND `name` like ?) OR `directory` || `name` like ?) ORDER BY `directory` || `name` ASC LIMIT ?", tableName)
return fmt.Sprintf("SELECT `directory`, `name`, `meta` FROM `%s` WHERE CONCAT(`directory`, `name`) > ? AND ((`dirhash` = ? AND `name` like ?) OR CONCAT(`directory`, `name`) like ?) ORDER BY CONCAT(`directory`, `name`) ASC LIMIT ?", tableName)
}
func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string {

2
weed/s3api/s3api_object_handlers_list.go

@ -324,7 +324,7 @@ type ListingCursor struct {
nextMarker string
}
func getStartFileFromKey(key string) string {
func (s3a *S3ApiServer) getStartFileFromKey(key string) string {
idx := strings.LastIndex(key, "/")
if idx == -1 {
return "/" + key

2
weed/server/filer_server_handlers_write_autochunk.go

@ -268,7 +268,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, skipCheckParentDirEntry(r), so.MaxFileNameLength)
// In test_bucket_listv2_delimiter_basic, the valid object key is the parent folder
if dbErr != nil && strings.HasSuffix(dbErr.Error(), " is a file") && r.Header.Get(s3_constants.AmzIdentityId) != "" {
if dbErr != nil && strings.HasSuffix(dbErr.Error(), " is a file") && (r.Header.Get(s3_constants.AmzAuthType) != "" || r.Header.Get("X-Amz-Date") != "") {
dbErr = fs.filer.CreateEntry(ctx, entry, false, false, nil, true, so.MaxFileNameLength)
}
if dbErr != nil {

Loading…
Cancel
Save