You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

357 lines
10 KiB

9 years ago
6 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
7 years ago
6 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
6 years ago
5 years ago
5 years ago
9 years ago
9 years ago
9 years ago
5 years ago
9 years ago
  1. package weed_server
  2. import (
  3. "context"
  4. "crypto/md5"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "mime"
  11. "net/http"
  12. "net/url"
  13. "os"
  14. filenamePath "path"
  15. "strconv"
  16. "strings"
  17. "time"
  18. "github.com/chrislusf/seaweedfs/weed/filer2"
  19. "github.com/chrislusf/seaweedfs/weed/glog"
  20. "github.com/chrislusf/seaweedfs/weed/operation"
  21. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  22. "github.com/chrislusf/seaweedfs/weed/security"
  23. "github.com/chrislusf/seaweedfs/weed/stats"
  24. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  25. "github.com/chrislusf/seaweedfs/weed/util"
  26. )
  27. var (
  28. OS_UID = uint32(os.Getuid())
  29. OS_GID = uint32(os.Getgid())
  30. )
  31. type FilerPostResult struct {
  32. Name string `json:"name,omitempty"`
  33. Size int64 `json:"size,omitempty"`
  34. Error string `json:"error,omitempty"`
  35. Fid string `json:"fid,omitempty"`
  36. Url string `json:"url,omitempty"`
  37. }
  38. func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
  39. stats.FilerRequestCounter.WithLabelValues("assign").Inc()
  40. start := time.Now()
  41. defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }()
  42. ar := &operation.VolumeAssignRequest{
  43. Count: 1,
  44. Replication: replication,
  45. Collection: collection,
  46. Ttl: ttlString,
  47. DataCenter: dataCenter,
  48. }
  49. var altRequest *operation.VolumeAssignRequest
  50. if dataCenter != "" {
  51. altRequest = &operation.VolumeAssignRequest{
  52. Count: 1,
  53. Replication: replication,
  54. Collection: collection,
  55. Ttl: ttlString,
  56. DataCenter: "",
  57. }
  58. }
  59. assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
  60. if ae != nil {
  61. glog.Errorf("failing to assign a file id: %v", ae)
  62. writeJsonError(w, r, http.StatusInternalServerError, ae)
  63. err = ae
  64. return
  65. }
  66. fileId = assignResult.Fid
  67. urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
  68. auth = assignResult.Auth
  69. return
  70. }
  71. func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
  72. ctx := context.Background()
  73. query := r.URL.Query()
  74. collection, replication := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
  75. dataCenter := query.Get("dataCenter")
  76. if dataCenter == "" {
  77. dataCenter = fs.option.DataCenter
  78. }
  79. ttlString := r.URL.Query().Get("ttl")
  80. // read ttl in seconds
  81. ttl, err := needle.ReadTTL(ttlString)
  82. ttlSeconds := int32(0)
  83. if err == nil {
  84. ttlSeconds = int32(ttl.Minutes()) * 60
  85. }
  86. if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString); autoChunked {
  87. return
  88. }
  89. if fs.option.Cipher {
  90. reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString)
  91. if err != nil {
  92. writeJsonError(w, r, http.StatusInternalServerError, err)
  93. } else if reply != nil {
  94. writeJsonQuiet(w, r, http.StatusCreated, reply)
  95. }
  96. return
  97. }
  98. fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
  99. if err != nil || fileId == "" || urlLocation == "" {
  100. glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
  101. writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter))
  102. return
  103. }
  104. glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
  105. u, _ := url.Parse(urlLocation)
  106. ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
  107. if err != nil {
  108. return
  109. }
  110. if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil {
  111. return
  112. }
  113. // send back post result
  114. reply := FilerPostResult{
  115. Name: ret.Name,
  116. Size: int64(ret.Size),
  117. Error: ret.Error,
  118. Fid: fileId,
  119. Url: urlLocation,
  120. }
  121. setEtag(w, ret.ETag)
  122. writeJsonQuiet(w, r, http.StatusCreated, reply)
  123. }
  124. // update metadata in filer store
  125. func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string,
  126. collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) {
  127. stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
  128. start := time.Now()
  129. defer func() {
  130. stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds())
  131. }()
  132. modeStr := r.URL.Query().Get("mode")
  133. if modeStr == "" {
  134. modeStr = "0660"
  135. }
  136. mode, err := strconv.ParseUint(modeStr, 8, 32)
  137. if err != nil {
  138. glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
  139. mode = 0660
  140. }
  141. path := r.URL.Path
  142. if strings.HasSuffix(path, "/") {
  143. if ret.Name != "" {
  144. path += ret.Name
  145. }
  146. }
  147. existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
  148. crTime := time.Now()
  149. if err == nil && existingEntry != nil {
  150. crTime = existingEntry.Crtime
  151. }
  152. entry := &filer2.Entry{
  153. FullPath: util.FullPath(path),
  154. Attr: filer2.Attr{
  155. Mtime: time.Now(),
  156. Crtime: crTime,
  157. Mode: os.FileMode(mode),
  158. Uid: OS_UID,
  159. Gid: OS_GID,
  160. Replication: replication,
  161. Collection: collection,
  162. TtlSec: ttlSeconds,
  163. Mime: ret.Mime,
  164. Md5: md5value,
  165. },
  166. Chunks: []*filer_pb.FileChunk{{
  167. FileId: fileId,
  168. Size: uint64(ret.Size),
  169. Mtime: time.Now().UnixNano(),
  170. ETag: ret.ETag,
  171. }},
  172. }
  173. if entry.Attr.Mime == "" {
  174. if ext := filenamePath.Ext(path); ext != "" {
  175. entry.Attr.Mime = mime.TypeByExtension(ext)
  176. }
  177. }
  178. // glog.V(4).Infof("saving %s => %+v", path, entry)
  179. if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil {
  180. fs.filer.DeleteChunks(entry.Chunks)
  181. glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
  182. writeJsonError(w, r, http.StatusInternalServerError, dbErr)
  183. err = dbErr
  184. return
  185. }
  186. return nil
  187. }
  188. // send request to volume server
  189. func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) {
  190. stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
  191. start := time.Now()
  192. defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
  193. ret = &operation.UploadResult{}
  194. md5Hash := md5.New()
  195. body := r.Body
  196. if r.Method == "PUT" {
  197. // only PUT or large chunked files has Md5 in attributes
  198. body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash))
  199. }
  200. request := &http.Request{
  201. Method: r.Method,
  202. URL: u,
  203. Proto: r.Proto,
  204. ProtoMajor: r.ProtoMajor,
  205. ProtoMinor: r.ProtoMinor,
  206. Header: r.Header,
  207. Body: body,
  208. Host: r.Host,
  209. ContentLength: r.ContentLength,
  210. }
  211. if auth != "" {
  212. request.Header.Set("Authorization", "BEARER "+string(auth))
  213. }
  214. resp, doErr := util.Do(request)
  215. if doErr != nil {
  216. glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method)
  217. writeJsonError(w, r, http.StatusInternalServerError, doErr)
  218. err = doErr
  219. return
  220. }
  221. defer func() {
  222. io.Copy(ioutil.Discard, resp.Body)
  223. resp.Body.Close()
  224. }()
  225. respBody, raErr := ioutil.ReadAll(resp.Body)
  226. if raErr != nil {
  227. glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error())
  228. writeJsonError(w, r, http.StatusInternalServerError, raErr)
  229. err = raErr
  230. return
  231. }
  232. glog.V(4).Infoln("post result", string(respBody))
  233. unmarshalErr := json.Unmarshal(respBody, &ret)
  234. if unmarshalErr != nil {
  235. glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody))
  236. writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr)
  237. err = unmarshalErr
  238. return
  239. }
  240. if ret.Error != "" {
  241. err = errors.New(ret.Error)
  242. glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error)
  243. writeJsonError(w, r, http.StatusInternalServerError, err)
  244. return
  245. }
  246. // find correct final path
  247. path := r.URL.Path
  248. if strings.HasSuffix(path, "/") {
  249. if ret.Name != "" {
  250. path += ret.Name
  251. } else {
  252. err = fmt.Errorf("can not to write to folder %s without a file name", path)
  253. fs.filer.DeleteFileByFileId(fileId)
  254. glog.V(0).Infoln("Can not to write to folder", path, "without a file name!")
  255. writeJsonError(w, r, http.StatusInternalServerError, err)
  256. return
  257. }
  258. }
  259. // use filer calculated md5 ETag, instead of the volume server crc ETag
  260. if r.Method == "PUT" {
  261. md5value = md5Hash.Sum(nil)
  262. }
  263. ret.ETag = getEtag(resp)
  264. return
  265. }
  266. // curl -X DELETE http://localhost:8888/path/to
  267. // curl -X DELETE http://localhost:8888/path/to?recursive=true
  268. // curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true
  269. // curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true
  270. func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
  271. isRecursive := r.FormValue("recursive") == "true"
  272. if !isRecursive && fs.option.recursiveDelete {
  273. if r.FormValue("recursive") != "false" {
  274. isRecursive = true
  275. }
  276. }
  277. ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true"
  278. skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true"
  279. err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion)
  280. if err != nil {
  281. glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error())
  282. httpStatus := http.StatusInternalServerError
  283. if err == filer_pb.ErrNotFound {
  284. httpStatus = http.StatusNotFound
  285. }
  286. writeJsonError(w, r, httpStatus, err)
  287. return
  288. }
  289. w.WriteHeader(http.StatusNoContent)
  290. }
  291. func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string) {
  292. // default
  293. collection = fs.option.Collection
  294. replication = fs.option.DefaultReplication
  295. // get default collection settings
  296. if qCollection != "" {
  297. collection = qCollection
  298. }
  299. if qReplication != "" {
  300. replication = qReplication
  301. }
  302. // required by buckets folder
  303. if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
  304. bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:]
  305. t := strings.Index(bucketAndObjectKey, "/")
  306. if t < 0 {
  307. collection = bucketAndObjectKey
  308. }
  309. if t > 0 {
  310. collection = bucketAndObjectKey[:t]
  311. }
  312. replication = fs.filer.ReadBucketOption(collection)
  313. }
  314. return
  315. }