You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

327 lines
9.0 KiB

9 years ago
9 years ago
9 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
9 years ago
  1. package weed_server
  2. import (
  3. "bytes"
  4. "context"
  5. "io"
  6. "mime"
  7. "mime/multipart"
  8. "net/http"
  9. "net/url"
  10. "path"
  11. "strconv"
  12. "strings"
  13. "time"
  14. "encoding/json"
  15. "github.com/chrislusf/seaweedfs/weed/glog"
  16. "github.com/chrislusf/seaweedfs/weed/images"
  17. "github.com/chrislusf/seaweedfs/weed/operation"
  18. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  19. "github.com/chrislusf/seaweedfs/weed/util"
  20. )
  21. var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
  22. func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
  23. n := new(needle.Needle)
  24. vid, fid, filename, ext, _ := parseURLPath(r.URL.Path)
  25. volumeId, err := needle.NewVolumeId(vid)
  26. if err != nil {
  27. glog.V(2).Infoln("parsing error:", err, r.URL.Path)
  28. w.WriteHeader(http.StatusBadRequest)
  29. return
  30. }
  31. err = n.ParsePath(fid)
  32. if err != nil {
  33. glog.V(2).Infoln("parsing fid error:", err, r.URL.Path)
  34. w.WriteHeader(http.StatusBadRequest)
  35. return
  36. }
  37. glog.V(4).Infoln("volume", volumeId, "reading", n)
  38. hasVolume := vs.store.HasVolume(volumeId)
  39. _, hasEcVolume := vs.store.FindEcVolume(volumeId)
  40. if !hasVolume && !hasEcVolume {
  41. if !vs.ReadRedirect {
  42. glog.V(2).Infoln("volume is not local:", err, r.URL.Path)
  43. w.WriteHeader(http.StatusNotFound)
  44. return
  45. }
  46. lookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())
  47. glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
  48. if err == nil && len(lookupResult.Locations) > 0 {
  49. u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
  50. u.Path = r.URL.Path
  51. arg := url.Values{}
  52. if c := r.FormValue("collection"); c != "" {
  53. arg.Set("collection", c)
  54. }
  55. u.RawQuery = arg.Encode()
  56. http.Redirect(w, r, u.String(), http.StatusMovedPermanently)
  57. } else {
  58. glog.V(2).Infoln("lookup error:", err, r.URL.Path)
  59. w.WriteHeader(http.StatusNotFound)
  60. }
  61. return
  62. }
  63. cookie := n.Cookie
  64. var count int
  65. if hasVolume {
  66. count, err = vs.store.ReadVolumeNeedle(volumeId, n)
  67. } else if hasEcVolume {
  68. count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n)
  69. }
  70. glog.V(4).Infoln("read bytes", count, "error", err)
  71. if err != nil || count < 0 {
  72. glog.V(0).Infof("read %s error: %v", r.URL.Path, err)
  73. w.WriteHeader(http.StatusNotFound)
  74. return
  75. }
  76. if n.Cookie != cookie {
  77. glog.V(0).Infof("request %s with cookie:%x expected:%x from %s agent %s", r.URL.Path, cookie, n.Cookie, r.RemoteAddr, r.UserAgent())
  78. w.WriteHeader(http.StatusNotFound)
  79. return
  80. }
  81. if n.LastModified != 0 {
  82. w.Header().Set("Last-Modified", time.Unix(int64(n.LastModified), 0).UTC().Format(http.TimeFormat))
  83. if r.Header.Get("If-Modified-Since") != "" {
  84. if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
  85. if t.Unix() >= int64(n.LastModified) {
  86. w.WriteHeader(http.StatusNotModified)
  87. return
  88. }
  89. }
  90. }
  91. }
  92. if inm := r.Header.Get("If-None-Match"); inm == "\""+n.Etag()+"\"" {
  93. w.WriteHeader(http.StatusNotModified)
  94. return
  95. }
  96. if r.Header.Get("ETag-MD5") == "True" {
  97. setEtag(w, n.MD5())
  98. } else {
  99. setEtag(w, n.Etag())
  100. }
  101. if n.HasPairs() {
  102. pairMap := make(map[string]string)
  103. err = json.Unmarshal(n.Pairs, &pairMap)
  104. if err != nil {
  105. glog.V(0).Infoln("Unmarshal pairs error:", err)
  106. }
  107. for k, v := range pairMap {
  108. w.Header().Set(k, v)
  109. }
  110. }
  111. if vs.tryHandleChunkedFile(n, filename, w, r) {
  112. return
  113. }
  114. if n.NameSize > 0 && filename == "" {
  115. filename = string(n.Name)
  116. if ext == "" {
  117. ext = path.Ext(filename)
  118. }
  119. }
  120. mtype := ""
  121. if n.MimeSize > 0 {
  122. mt := string(n.Mime)
  123. if !strings.HasPrefix(mt, "application/octet-stream") {
  124. mtype = mt
  125. }
  126. }
  127. if ext != ".gz" {
  128. if n.IsGzipped() {
  129. if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
  130. w.Header().Set("Content-Encoding", "gzip")
  131. } else {
  132. if n.Data, err = util.UnGzipData(n.Data); err != nil {
  133. glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
  134. }
  135. }
  136. }
  137. }
  138. rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)
  139. if e := writeResponseContent(filename, mtype, rs, w, r); e != nil {
  140. glog.V(2).Infoln("response write error:", e)
  141. }
  142. }
  143. func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
  144. if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" {
  145. return false
  146. }
  147. chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
  148. if e != nil {
  149. glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e)
  150. return false
  151. }
  152. if fileName == "" && chunkManifest.Name != "" {
  153. fileName = chunkManifest.Name
  154. }
  155. ext := path.Ext(fileName)
  156. mType := ""
  157. if chunkManifest.Mime != "" {
  158. mt := chunkManifest.Mime
  159. if !strings.HasPrefix(mt, "application/octet-stream") {
  160. mType = mt
  161. }
  162. }
  163. w.Header().Set("X-File-Store", "chunked")
  164. chunkedFileReader := &operation.ChunkedFileReader{
  165. Manifest: chunkManifest,
  166. Master: vs.GetMaster(),
  167. }
  168. defer chunkedFileReader.Close()
  169. rs := conditionallyResizeImages(chunkedFileReader, ext, r)
  170. if e := writeResponseContent(fileName, mType, rs, w, r); e != nil {
  171. glog.V(2).Infoln("response write error:", e)
  172. }
  173. return true
  174. }
  175. func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {
  176. rs := originalDataReaderSeeker
  177. if len(ext) > 0 {
  178. ext = strings.ToLower(ext)
  179. }
  180. if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" {
  181. width, height := 0, 0
  182. if r.FormValue("width") != "" {
  183. width, _ = strconv.Atoi(r.FormValue("width"))
  184. }
  185. if r.FormValue("height") != "" {
  186. height, _ = strconv.Atoi(r.FormValue("height"))
  187. }
  188. rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode"))
  189. }
  190. return rs
  191. }
  192. func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {
  193. totalSize, e := rs.Seek(0, 2)
  194. if mimeType == "" {
  195. if ext := path.Ext(filename); ext != "" {
  196. mimeType = mime.TypeByExtension(ext)
  197. }
  198. }
  199. if mimeType != "" {
  200. w.Header().Set("Content-Type", mimeType)
  201. }
  202. if filename != "" {
  203. contentDisposition := "inline"
  204. if r.FormValue("dl") != "" {
  205. if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
  206. contentDisposition = "attachment"
  207. }
  208. }
  209. w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`)
  210. }
  211. w.Header().Set("Accept-Ranges", "bytes")
  212. if r.Method == "HEAD" {
  213. w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
  214. return nil
  215. }
  216. rangeReq := r.Header.Get("Range")
  217. if rangeReq == "" {
  218. w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
  219. if _, e = rs.Seek(0, 0); e != nil {
  220. return e
  221. }
  222. _, e = io.Copy(w, rs)
  223. return e
  224. }
  225. //the rest is dealing with partial content request
  226. //mostly copy from src/pkg/net/http/fs.go
  227. ranges, err := parseRange(rangeReq, totalSize)
  228. if err != nil {
  229. http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
  230. return nil
  231. }
  232. if sumRangesSize(ranges) > totalSize {
  233. // The total number of bytes in all the ranges
  234. // is larger than the size of the file by
  235. // itself, so this is probably an attack, or a
  236. // dumb client. Ignore the range request.
  237. return nil
  238. }
  239. if len(ranges) == 0 {
  240. return nil
  241. }
  242. if len(ranges) == 1 {
  243. // RFC 2616, Section 14.16:
  244. // "When an HTTP message includes the content of a single
  245. // range (for example, a response to a request for a
  246. // single range, or to a request for a set of ranges
  247. // that overlap without any holes), this content is
  248. // transmitted with a Content-Range header, and a
  249. // Content-Length header showing the number of bytes
  250. // actually transferred.
  251. // ...
  252. // A response to a request for a single range MUST NOT
  253. // be sent using the multipart/byteranges media type."
  254. ra := ranges[0]
  255. w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
  256. w.Header().Set("Content-Range", ra.contentRange(totalSize))
  257. w.WriteHeader(http.StatusPartialContent)
  258. if _, e = rs.Seek(ra.start, 0); e != nil {
  259. return e
  260. }
  261. _, e = io.CopyN(w, rs, ra.length)
  262. return e
  263. }
  264. // process multiple ranges
  265. for _, ra := range ranges {
  266. if ra.start > totalSize {
  267. http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
  268. return nil
  269. }
  270. }
  271. sendSize := rangesMIMESize(ranges, mimeType, totalSize)
  272. pr, pw := io.Pipe()
  273. mw := multipart.NewWriter(pw)
  274. w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
  275. sendContent := pr
  276. defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
  277. go func() {
  278. for _, ra := range ranges {
  279. part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
  280. if e != nil {
  281. pw.CloseWithError(e)
  282. return
  283. }
  284. if _, e = rs.Seek(ra.start, 0); e != nil {
  285. pw.CloseWithError(e)
  286. return
  287. }
  288. if _, e = io.CopyN(part, rs, ra.length); e != nil {
  289. pw.CloseWithError(e)
  290. return
  291. }
  292. }
  293. mw.Close()
  294. pw.Close()
  295. }()
  296. if w.Header().Get("Content-Encoding") == "" {
  297. w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
  298. }
  299. w.WriteHeader(http.StatusPartialContent)
  300. _, e = io.CopyN(w, sendContent, sendSize)
  301. return e
  302. }