You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

326 lines
9.0 KiB

9 years ago
9 years ago
9 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
9 years ago
  1. package weed_server
  2. import (
  3. "bytes"
  4. "io"
  5. "mime"
  6. "mime/multipart"
  7. "net/http"
  8. "net/url"
  9. "path"
  10. "strconv"
  11. "strings"
  12. "time"
  13. "encoding/json"
  14. "github.com/chrislusf/seaweedfs/weed/glog"
  15. "github.com/chrislusf/seaweedfs/weed/images"
  16. "github.com/chrislusf/seaweedfs/weed/operation"
  17. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  18. "github.com/chrislusf/seaweedfs/weed/util"
  19. )
  20. var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
  21. func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
  22. n := new(needle.Needle)
  23. vid, fid, filename, ext, _ := parseURLPath(r.URL.Path)
  24. volumeId, err := needle.NewVolumeId(vid)
  25. if err != nil {
  26. glog.V(2).Infoln("parsing error:", err, r.URL.Path)
  27. w.WriteHeader(http.StatusBadRequest)
  28. return
  29. }
  30. err = n.ParsePath(fid)
  31. if err != nil {
  32. glog.V(2).Infoln("parsing fid error:", err, r.URL.Path)
  33. w.WriteHeader(http.StatusBadRequest)
  34. return
  35. }
  36. glog.V(4).Infoln("volume", volumeId, "reading", n)
  37. hasVolume := vs.store.HasVolume(volumeId)
  38. _, hasEcShard := vs.store.HasEcShard(volumeId)
  39. if !hasVolume && !hasEcShard {
  40. if !vs.ReadRedirect {
  41. glog.V(2).Infoln("volume is not local:", err, r.URL.Path)
  42. w.WriteHeader(http.StatusNotFound)
  43. return
  44. }
  45. lookupResult, err := operation.Lookup(vs.GetMaster(), volumeId.String())
  46. glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
  47. if err == nil && len(lookupResult.Locations) > 0 {
  48. u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl))
  49. u.Path = r.URL.Path
  50. arg := url.Values{}
  51. if c := r.FormValue("collection"); c != "" {
  52. arg.Set("collection", c)
  53. }
  54. u.RawQuery = arg.Encode()
  55. http.Redirect(w, r, u.String(), http.StatusMovedPermanently)
  56. } else {
  57. glog.V(2).Infoln("lookup error:", err, r.URL.Path)
  58. w.WriteHeader(http.StatusNotFound)
  59. }
  60. return
  61. }
  62. cookie := n.Cookie
  63. var count int
  64. if hasVolume {
  65. count, err = vs.store.ReadVolumeNeedle(volumeId, n)
  66. } else if hasEcShard {
  67. count, err = vs.store.ReadEcShardNeedle(volumeId, n)
  68. }
  69. glog.V(4).Infoln("read bytes", count, "error", err)
  70. if err != nil || count < 0 {
  71. glog.V(0).Infof("read %s error: %v", r.URL.Path, err)
  72. w.WriteHeader(http.StatusNotFound)
  73. return
  74. }
  75. if n.Cookie != cookie {
  76. glog.V(0).Infof("request %s with cookie:%x expected:%x from %s agent %s", r.URL.Path, cookie, n.Cookie, r.RemoteAddr, r.UserAgent())
  77. w.WriteHeader(http.StatusNotFound)
  78. return
  79. }
  80. if n.LastModified != 0 {
  81. w.Header().Set("Last-Modified", time.Unix(int64(n.LastModified), 0).UTC().Format(http.TimeFormat))
  82. if r.Header.Get("If-Modified-Since") != "" {
  83. if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
  84. if t.Unix() >= int64(n.LastModified) {
  85. w.WriteHeader(http.StatusNotModified)
  86. return
  87. }
  88. }
  89. }
  90. }
  91. if inm := r.Header.Get("If-None-Match"); inm == "\""+n.Etag()+"\"" {
  92. w.WriteHeader(http.StatusNotModified)
  93. return
  94. }
  95. if r.Header.Get("ETag-MD5") == "True" {
  96. setEtag(w, n.MD5())
  97. } else {
  98. setEtag(w, n.Etag())
  99. }
  100. if n.HasPairs() {
  101. pairMap := make(map[string]string)
  102. err = json.Unmarshal(n.Pairs, &pairMap)
  103. if err != nil {
  104. glog.V(0).Infoln("Unmarshal pairs error:", err)
  105. }
  106. for k, v := range pairMap {
  107. w.Header().Set(k, v)
  108. }
  109. }
  110. if vs.tryHandleChunkedFile(n, filename, w, r) {
  111. return
  112. }
  113. if n.NameSize > 0 && filename == "" {
  114. filename = string(n.Name)
  115. if ext == "" {
  116. ext = path.Ext(filename)
  117. }
  118. }
  119. mtype := ""
  120. if n.MimeSize > 0 {
  121. mt := string(n.Mime)
  122. if !strings.HasPrefix(mt, "application/octet-stream") {
  123. mtype = mt
  124. }
  125. }
  126. if ext != ".gz" {
  127. if n.IsGzipped() {
  128. if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
  129. w.Header().Set("Content-Encoding", "gzip")
  130. } else {
  131. if n.Data, err = util.UnGzipData(n.Data); err != nil {
  132. glog.V(0).Infoln("ungzip error:", err, r.URL.Path)
  133. }
  134. }
  135. }
  136. }
  137. rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)
  138. if e := writeResponseContent(filename, mtype, rs, w, r); e != nil {
  139. glog.V(2).Infoln("response write error:", e)
  140. }
  141. }
  142. func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
  143. if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" {
  144. return false
  145. }
  146. chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
  147. if e != nil {
  148. glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e)
  149. return false
  150. }
  151. if fileName == "" && chunkManifest.Name != "" {
  152. fileName = chunkManifest.Name
  153. }
  154. ext := path.Ext(fileName)
  155. mType := ""
  156. if chunkManifest.Mime != "" {
  157. mt := chunkManifest.Mime
  158. if !strings.HasPrefix(mt, "application/octet-stream") {
  159. mType = mt
  160. }
  161. }
  162. w.Header().Set("X-File-Store", "chunked")
  163. chunkedFileReader := &operation.ChunkedFileReader{
  164. Manifest: chunkManifest,
  165. Master: vs.GetMaster(),
  166. }
  167. defer chunkedFileReader.Close()
  168. rs := conditionallyResizeImages(chunkedFileReader, ext, r)
  169. if e := writeResponseContent(fileName, mType, rs, w, r); e != nil {
  170. glog.V(2).Infoln("response write error:", e)
  171. }
  172. return true
  173. }
  174. func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker {
  175. rs := originalDataReaderSeeker
  176. if len(ext) > 0 {
  177. ext = strings.ToLower(ext)
  178. }
  179. if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" {
  180. width, height := 0, 0
  181. if r.FormValue("width") != "" {
  182. width, _ = strconv.Atoi(r.FormValue("width"))
  183. }
  184. if r.FormValue("height") != "" {
  185. height, _ = strconv.Atoi(r.FormValue("height"))
  186. }
  187. rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode"))
  188. }
  189. return rs
  190. }
  191. func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error {
  192. totalSize, e := rs.Seek(0, 2)
  193. if mimeType == "" {
  194. if ext := path.Ext(filename); ext != "" {
  195. mimeType = mime.TypeByExtension(ext)
  196. }
  197. }
  198. if mimeType != "" {
  199. w.Header().Set("Content-Type", mimeType)
  200. }
  201. if filename != "" {
  202. contentDisposition := "inline"
  203. if r.FormValue("dl") != "" {
  204. if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl {
  205. contentDisposition = "attachment"
  206. }
  207. }
  208. w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`)
  209. }
  210. w.Header().Set("Accept-Ranges", "bytes")
  211. if r.Method == "HEAD" {
  212. w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
  213. return nil
  214. }
  215. rangeReq := r.Header.Get("Range")
  216. if rangeReq == "" {
  217. w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
  218. if _, e = rs.Seek(0, 0); e != nil {
  219. return e
  220. }
  221. _, e = io.Copy(w, rs)
  222. return e
  223. }
  224. //the rest is dealing with partial content request
  225. //mostly copy from src/pkg/net/http/fs.go
  226. ranges, err := parseRange(rangeReq, totalSize)
  227. if err != nil {
  228. http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
  229. return nil
  230. }
  231. if sumRangesSize(ranges) > totalSize {
  232. // The total number of bytes in all the ranges
  233. // is larger than the size of the file by
  234. // itself, so this is probably an attack, or a
  235. // dumb client. Ignore the range request.
  236. return nil
  237. }
  238. if len(ranges) == 0 {
  239. return nil
  240. }
  241. if len(ranges) == 1 {
  242. // RFC 2616, Section 14.16:
  243. // "When an HTTP message includes the content of a single
  244. // range (for example, a response to a request for a
  245. // single range, or to a request for a set of ranges
  246. // that overlap without any holes), this content is
  247. // transmitted with a Content-Range header, and a
  248. // Content-Length header showing the number of bytes
  249. // actually transferred.
  250. // ...
  251. // A response to a request for a single range MUST NOT
  252. // be sent using the multipart/byteranges media type."
  253. ra := ranges[0]
  254. w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
  255. w.Header().Set("Content-Range", ra.contentRange(totalSize))
  256. w.WriteHeader(http.StatusPartialContent)
  257. if _, e = rs.Seek(ra.start, 0); e != nil {
  258. return e
  259. }
  260. _, e = io.CopyN(w, rs, ra.length)
  261. return e
  262. }
  263. // process multiple ranges
  264. for _, ra := range ranges {
  265. if ra.start > totalSize {
  266. http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
  267. return nil
  268. }
  269. }
  270. sendSize := rangesMIMESize(ranges, mimeType, totalSize)
  271. pr, pw := io.Pipe()
  272. mw := multipart.NewWriter(pw)
  273. w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
  274. sendContent := pr
  275. defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
  276. go func() {
  277. for _, ra := range ranges {
  278. part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize))
  279. if e != nil {
  280. pw.CloseWithError(e)
  281. return
  282. }
  283. if _, e = rs.Seek(ra.start, 0); e != nil {
  284. pw.CloseWithError(e)
  285. return
  286. }
  287. if _, e = io.CopyN(part, rs, ra.length); e != nil {
  288. pw.CloseWithError(e)
  289. return
  290. }
  291. }
  292. mw.Close()
  293. pw.Close()
  294. }()
  295. if w.Header().Get("Content-Encoding") == "" {
  296. w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
  297. }
  298. w.WriteHeader(http.StatusPartialContent)
  299. _, e = io.CopyN(w, sendContent, sendSize)
  300. return e
  301. }