You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

364 lines
10 KiB

9 years ago
6 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
7 years ago
6 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
6 years ago
5 years ago
5 years ago
9 years ago
9 years ago
9 years ago
9 years ago
  1. package weed_server
  2. import (
  3. "context"
  4. "crypto/md5"
  5. "encoding/json"
  6. "errors"
  7. "fmt"
  8. "io"
  9. "io/ioutil"
  10. "mime"
  11. "net/http"
  12. "net/url"
  13. "os"
  14. filenamePath "path"
  15. "strconv"
  16. "strings"
  17. "time"
  18. "github.com/chrislusf/seaweedfs/weed/filer2"
  19. "github.com/chrislusf/seaweedfs/weed/glog"
  20. "github.com/chrislusf/seaweedfs/weed/operation"
  21. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  22. "github.com/chrislusf/seaweedfs/weed/security"
  23. "github.com/chrislusf/seaweedfs/weed/stats"
  24. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  25. "github.com/chrislusf/seaweedfs/weed/util"
  26. )
  27. var (
  28. OS_UID = uint32(os.Getuid())
  29. OS_GID = uint32(os.Getgid())
  30. )
  31. type FilerPostResult struct {
  32. Name string `json:"name,omitempty"`
  33. Size int64 `json:"size,omitempty"`
  34. Error string `json:"error,omitempty"`
  35. Fid string `json:"fid,omitempty"`
  36. Url string `json:"url,omitempty"`
  37. }
  38. func (fs *FilerServer) assignNewFileInfo(replication, collection, dataCenter, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
  39. stats.FilerRequestCounter.WithLabelValues("assign").Inc()
  40. start := time.Now()
  41. defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }()
  42. ar := &operation.VolumeAssignRequest{
  43. Count: 1,
  44. Replication: replication,
  45. Collection: collection,
  46. Ttl: ttlString,
  47. DataCenter: dataCenter,
  48. }
  49. var altRequest *operation.VolumeAssignRequest
  50. if dataCenter != "" {
  51. altRequest = &operation.VolumeAssignRequest{
  52. Count: 1,
  53. Replication: replication,
  54. Collection: collection,
  55. Ttl: ttlString,
  56. DataCenter: "",
  57. }
  58. }
  59. assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
  60. if ae != nil {
  61. glog.Errorf("failing to assign a file id: %v", ae)
  62. err = ae
  63. return
  64. }
  65. fileId = assignResult.Fid
  66. urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
  67. if fsync {
  68. urlLocation += "?fsync=true"
  69. }
  70. auth = assignResult.Auth
  71. return
  72. }
  73. func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
  74. ctx := context.Background()
  75. query := r.URL.Query()
  76. collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
  77. dataCenter := query.Get("dataCenter")
  78. if dataCenter == "" {
  79. dataCenter = fs.option.DataCenter
  80. }
  81. ttlString := r.URL.Query().Get("ttl")
  82. // read ttl in seconds
  83. ttl, err := needle.ReadTTL(ttlString)
  84. ttlSeconds := int32(0)
  85. if err == nil {
  86. ttlSeconds = int32(ttl.Minutes()) * 60
  87. }
  88. if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync); autoChunked {
  89. return
  90. }
  91. if fs.option.Cipher {
  92. reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync)
  93. if err != nil {
  94. writeJsonError(w, r, http.StatusInternalServerError, err)
  95. } else if reply != nil {
  96. writeJsonQuiet(w, r, http.StatusCreated, reply)
  97. }
  98. return
  99. }
  100. fileId, urlLocation, auth, err := fs.assignNewFileInfo(replication, collection, dataCenter, ttlString, fsync)
  101. if err != nil || fileId == "" || urlLocation == "" {
  102. glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
  103. writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter))
  104. return
  105. }
  106. glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
  107. u, _ := url.Parse(urlLocation)
  108. ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
  109. if err != nil {
  110. return
  111. }
  112. if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil {
  113. return
  114. }
  115. // send back post result
  116. reply := FilerPostResult{
  117. Name: ret.Name,
  118. Size: int64(ret.Size),
  119. Error: ret.Error,
  120. Fid: fileId,
  121. Url: urlLocation,
  122. }
  123. setEtag(w, ret.ETag)
  124. writeJsonQuiet(w, r, http.StatusCreated, reply)
  125. }
  126. // update metadata in filer store
  127. func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string,
  128. collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) {
  129. stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
  130. start := time.Now()
  131. defer func() {
  132. stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds())
  133. }()
  134. modeStr := r.URL.Query().Get("mode")
  135. if modeStr == "" {
  136. modeStr = "0660"
  137. }
  138. mode, err := strconv.ParseUint(modeStr, 8, 32)
  139. if err != nil {
  140. glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr)
  141. mode = 0660
  142. }
  143. path := r.URL.Path
  144. if strings.HasSuffix(path, "/") {
  145. if ret.Name != "" {
  146. path += ret.Name
  147. }
  148. }
  149. existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path))
  150. crTime := time.Now()
  151. if err == nil && existingEntry != nil {
  152. crTime = existingEntry.Crtime
  153. }
  154. entry := &filer2.Entry{
  155. FullPath: util.FullPath(path),
  156. Attr: filer2.Attr{
  157. Mtime: time.Now(),
  158. Crtime: crTime,
  159. Mode: os.FileMode(mode),
  160. Uid: OS_UID,
  161. Gid: OS_GID,
  162. Replication: replication,
  163. Collection: collection,
  164. TtlSec: ttlSeconds,
  165. Mime: ret.Mime,
  166. Md5: md5value,
  167. },
  168. Chunks: []*filer_pb.FileChunk{{
  169. FileId: fileId,
  170. Size: uint64(ret.Size),
  171. Mtime: time.Now().UnixNano(),
  172. ETag: ret.ETag,
  173. }},
  174. }
  175. if entry.Attr.Mime == "" {
  176. if ext := filenamePath.Ext(path); ext != "" {
  177. entry.Attr.Mime = mime.TypeByExtension(ext)
  178. }
  179. }
  180. // glog.V(4).Infof("saving %s => %+v", path, entry)
  181. if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil {
  182. fs.filer.DeleteChunks(entry.Chunks)
  183. glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr)
  184. writeJsonError(w, r, http.StatusInternalServerError, dbErr)
  185. err = dbErr
  186. return
  187. }
  188. return nil
  189. }
  190. // send request to volume server
  191. func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) {
  192. stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
  193. start := time.Now()
  194. defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
  195. ret = &operation.UploadResult{}
  196. md5Hash := md5.New()
  197. body := r.Body
  198. if r.Method == "PUT" {
  199. // only PUT or large chunked files has Md5 in attributes
  200. body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash))
  201. }
  202. request := &http.Request{
  203. Method: r.Method,
  204. URL: u,
  205. Proto: r.Proto,
  206. ProtoMajor: r.ProtoMajor,
  207. ProtoMinor: r.ProtoMinor,
  208. Header: r.Header,
  209. Body: body,
  210. Host: r.Host,
  211. ContentLength: r.ContentLength,
  212. }
  213. if auth != "" {
  214. request.Header.Set("Authorization", "BEARER "+string(auth))
  215. }
  216. resp, doErr := util.Do(request)
  217. if doErr != nil {
  218. glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method)
  219. writeJsonError(w, r, http.StatusInternalServerError, doErr)
  220. err = doErr
  221. return
  222. }
  223. defer func() {
  224. io.Copy(ioutil.Discard, resp.Body)
  225. resp.Body.Close()
  226. }()
  227. respBody, raErr := ioutil.ReadAll(resp.Body)
  228. if raErr != nil {
  229. glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error())
  230. writeJsonError(w, r, http.StatusInternalServerError, raErr)
  231. err = raErr
  232. return
  233. }
  234. glog.V(4).Infoln("post result", string(respBody))
  235. unmarshalErr := json.Unmarshal(respBody, &ret)
  236. if unmarshalErr != nil {
  237. glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody))
  238. writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr)
  239. err = unmarshalErr
  240. return
  241. }
  242. if ret.Error != "" {
  243. err = errors.New(ret.Error)
  244. glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error)
  245. writeJsonError(w, r, http.StatusInternalServerError, err)
  246. return
  247. }
  248. // find correct final path
  249. path := r.URL.Path
  250. if strings.HasSuffix(path, "/") {
  251. if ret.Name != "" {
  252. path += ret.Name
  253. } else {
  254. err = fmt.Errorf("can not to write to folder %s without a file name", path)
  255. fs.filer.DeleteFileByFileId(fileId)
  256. glog.V(0).Infoln("Can not to write to folder", path, "without a file name!")
  257. writeJsonError(w, r, http.StatusInternalServerError, err)
  258. return
  259. }
  260. }
  261. // use filer calculated md5 ETag, instead of the volume server crc ETag
  262. if r.Method == "PUT" {
  263. md5value = md5Hash.Sum(nil)
  264. }
  265. ret.ETag = getEtag(resp)
  266. return
  267. }
  268. // curl -X DELETE http://localhost:8888/path/to
  269. // curl -X DELETE http://localhost:8888/path/to?recursive=true
  270. // curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true
  271. // curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true
  272. func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
  273. isRecursive := r.FormValue("recursive") == "true"
  274. if !isRecursive && fs.option.recursiveDelete {
  275. if r.FormValue("recursive") != "false" {
  276. isRecursive = true
  277. }
  278. }
  279. ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true"
  280. skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true"
  281. objectPath := r.URL.Path
  282. if len(r.URL.Path) > 1 && strings.HasSuffix(objectPath, "/") {
  283. objectPath = objectPath[0 : len(objectPath)-1]
  284. }
  285. err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false)
  286. if err != nil {
  287. glog.V(1).Infoln("deleting", objectPath, ":", err.Error())
  288. httpStatus := http.StatusInternalServerError
  289. if err == filer_pb.ErrNotFound {
  290. httpStatus = http.StatusNotFound
  291. }
  292. writeJsonError(w, r, httpStatus, err)
  293. return
  294. }
  295. w.WriteHeader(http.StatusNoContent)
  296. }
  297. func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) {
  298. // default
  299. collection = fs.option.Collection
  300. replication = fs.option.DefaultReplication
  301. // get default collection settings
  302. if qCollection != "" {
  303. collection = qCollection
  304. }
  305. if qReplication != "" {
  306. replication = qReplication
  307. }
  308. // required by buckets folder
  309. if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") {
  310. bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:]
  311. t := strings.Index(bucketAndObjectKey, "/")
  312. if t < 0 {
  313. collection = bucketAndObjectKey
  314. }
  315. if t > 0 {
  316. collection = bucketAndObjectKey[:t]
  317. }
  318. replication, fsync = fs.filer.ReadBucketOption(collection)
  319. }
  320. return
  321. }