You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

440 lines
11 KiB

6 years ago
6 years ago
6 years ago
4 years ago
6 years ago
4 years ago
6 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
adding locking to avoid nil VolumeLocationList fix panic: runtime error: invalid memory address or nil pointer dereference Oct 22 00:53:44 bedb-master1 weed[8055]: [signal SIGSEGV: segmentation violation code=0x1 addr=0x8 pc=0x17658da] Oct 22 00:53:44 bedb-master1 weed[8055]: goroutine 310 [running]: Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLocationList).Length(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_location_list.go:35 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).enoughCopies(...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:376 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*VolumeLayout).ensureCorrectWritables(0xc000111d50, 0xc000b55438) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/volume_layout.go:202 +0x5a Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/topology.(*Topology).SyncDataNodeRegistration(0xc00042ac60, 0xc001454d30, 0x1, 0x1, 0xc0005fc000, 0xc00135de40, 0x4, 0xc00135de50, 0x10, 0x10d, ...) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/topology/topology.go:224 +0x616 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/server.(*MasterServer).SendHeartbeat(0xc000162700, 0x23b97c0, 0xc000ae2c90, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/server/master_grpc_server.go:106 +0x325 Oct 22 00:53:44 bedb-master1 weed[8055]: github.com/chrislusf/seaweedfs/weed/pb/master_pb._Seaweed_SendHeartbeat_Handler(0x1f8e7c0, 0xc000162700, 0x23b0a60, 0xc00024b440, 0x3172c38, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/seaweedfs/weed/pb/master_pb/master.pb.go:4250 +0xad Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).processStreamingRPC(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0xc0001fea80, 0x311fec0, 0x0, 0x0, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1329 +0xcd8 Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).handleStream(0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100, 0x0) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:1409 +0xc5c Oct 22 00:53:44 bedb-master1 weed[8055]: google.golang.org/grpc.(*Server).serveStreams.func1.1(0xc0001ce8b0, 0xc0001f31e0, 0x23bb800, 0xc000ac5500, 0xc000ab7100) Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:746 +0xa5 Oct 22 00:53:44 bedb-master1 weed[8055]: created by google.golang.org/grpc.(*Server).serveStreams.func1 Oct 22 00:53:44 bedb-master1 weed[8055]: #011/root/go/pkg/mod/google.golang.org/grpc@v1.29.1/server.go:744 +0xa5 Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Main process exited, code=exited, status=2/INVALIDARGUMENT Oct 22 00:53:44 bedb-master1 systemd[1]: weedmaster.service: Failed with result 'exit-code'.
4 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
  1. package topology
  2. import (
  3. "errors"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/storage/types"
  6. "math/rand"
  7. "sync"
  8. "time"
  9. "github.com/chrislusf/seaweedfs/weed/glog"
  10. "github.com/chrislusf/seaweedfs/weed/storage"
  11. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  12. "github.com/chrislusf/seaweedfs/weed/storage/super_block"
  13. )
  14. type copyState int
  15. const (
  16. noCopies copyState = 0 + iota
  17. insufficientCopies
  18. enoughCopies
  19. )
  20. type volumeState string
  21. const (
  22. readOnlyState volumeState = "ReadOnly"
  23. oversizedState = "Oversized"
  24. )
  25. type stateIndicator func(copyState) bool
  26. func ExistCopies() stateIndicator {
  27. return func(state copyState) bool { return state != noCopies }
  28. }
  29. func NoCopies() stateIndicator {
  30. return func(state copyState) bool { return state == noCopies }
  31. }
  32. type volumesBinaryState struct {
  33. rp *super_block.ReplicaPlacement
  34. name volumeState // the name for volume state (eg. "Readonly", "Oversized")
  35. indicator stateIndicator // indicate whether the volumes should be marked as `name`
  36. copyMap map[needle.VolumeId]*VolumeLocationList
  37. }
  38. func NewVolumesBinaryState(name volumeState, rp *super_block.ReplicaPlacement, indicator stateIndicator) *volumesBinaryState {
  39. return &volumesBinaryState{
  40. rp: rp,
  41. name: name,
  42. indicator: indicator,
  43. copyMap: make(map[needle.VolumeId]*VolumeLocationList),
  44. }
  45. }
  46. func (v *volumesBinaryState) Dump() (res []uint32) {
  47. for vid, list := range v.copyMap {
  48. if v.indicator(v.copyState(list)) {
  49. res = append(res, uint32(vid))
  50. }
  51. }
  52. return
  53. }
  54. func (v *volumesBinaryState) IsTrue(vid needle.VolumeId) bool {
  55. list, _ := v.copyMap[vid]
  56. return v.indicator(v.copyState(list))
  57. }
  58. func (v *volumesBinaryState) Add(vid needle.VolumeId, dn *DataNode) {
  59. list, _ := v.copyMap[vid]
  60. if list != nil {
  61. list.Set(dn)
  62. return
  63. }
  64. list = NewVolumeLocationList()
  65. list.Set(dn)
  66. v.copyMap[vid] = list
  67. }
  68. func (v *volumesBinaryState) Remove(vid needle.VolumeId, dn *DataNode) {
  69. list, _ := v.copyMap[vid]
  70. if list != nil {
  71. list.Remove(dn)
  72. if list.Length() == 0 {
  73. delete(v.copyMap, vid)
  74. }
  75. }
  76. }
  77. func (v *volumesBinaryState) copyState(list *VolumeLocationList) copyState {
  78. if list == nil {
  79. return noCopies
  80. }
  81. if list.Length() < v.rp.GetCopyCount() {
  82. return insufficientCopies
  83. }
  84. return enoughCopies
  85. }
  86. // mapping from volume to its locations, inverted from server to volume
  87. type VolumeLayout struct {
  88. rp *super_block.ReplicaPlacement
  89. ttl *needle.TTL
  90. diskType types.DiskType
  91. vid2location map[needle.VolumeId]*VolumeLocationList
  92. writables []needle.VolumeId // transient array of writable volume id
  93. readonlyVolumes *volumesBinaryState // readonly volumes
  94. oversizedVolumes *volumesBinaryState // oversized volumes
  95. volumeSizeLimit uint64
  96. replicationAsMin bool
  97. accessLock sync.RWMutex
  98. }
  99. type VolumeLayoutStats struct {
  100. TotalSize uint64
  101. UsedSize uint64
  102. FileCount uint64
  103. }
  104. func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType types.DiskType, volumeSizeLimit uint64, replicationAsMin bool) *VolumeLayout {
  105. return &VolumeLayout{
  106. rp: rp,
  107. ttl: ttl,
  108. diskType: diskType,
  109. vid2location: make(map[needle.VolumeId]*VolumeLocationList),
  110. writables: *new([]needle.VolumeId),
  111. readonlyVolumes: NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
  112. oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
  113. volumeSizeLimit: volumeSizeLimit,
  114. replicationAsMin: replicationAsMin,
  115. }
  116. }
  117. func (vl *VolumeLayout) String() string {
  118. vl.accessLock.RLock()
  119. defer vl.accessLock.RUnlock()
  120. return fmt.Sprintf("rp:%v, ttl:%v, vid2location:%v, writables:%v, volumeSizeLimit:%v", vl.rp, vl.ttl, vl.vid2location, vl.writables, vl.volumeSizeLimit)
  121. }
  122. func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  123. vl.accessLock.Lock()
  124. defer vl.accessLock.Unlock()
  125. defer vl.rememberOversizedVolume(v, dn)
  126. if _, ok := vl.vid2location[v.Id]; !ok {
  127. vl.vid2location[v.Id] = NewVolumeLocationList()
  128. }
  129. vl.vid2location[v.Id].Set(dn)
  130. // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount())
  131. for _, dn := range vl.vid2location[v.Id].list {
  132. if vInfo, err := dn.GetVolumesById(v.Id); err == nil {
  133. if vInfo.ReadOnly {
  134. glog.V(1).Infof("vid %d removed from writable", v.Id)
  135. vl.removeFromWritable(v.Id)
  136. vl.readonlyVolumes.Add(v.Id, dn)
  137. return
  138. } else {
  139. vl.readonlyVolumes.Remove(v.Id, dn)
  140. }
  141. } else {
  142. glog.V(1).Infof("vid %d removed from writable", v.Id)
  143. vl.removeFromWritable(v.Id)
  144. vl.readonlyVolumes.Remove(v.Id, dn)
  145. return
  146. }
  147. }
  148. }
  149. func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo, dn *DataNode) {
  150. if vl.isOversized(v) {
  151. vl.oversizedVolumes.Add(v.Id, dn)
  152. } else {
  153. vl.oversizedVolumes.Remove(v.Id, dn)
  154. }
  155. }
  156. func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) {
  157. vl.accessLock.Lock()
  158. defer vl.accessLock.Unlock()
  159. // remove from vid2location map
  160. location, ok := vl.vid2location[v.Id]
  161. if !ok {
  162. return
  163. }
  164. if location.Remove(dn) {
  165. vl.readonlyVolumes.Remove(v.Id, dn)
  166. vl.oversizedVolumes.Remove(v.Id, dn)
  167. vl.ensureCorrectWritables(v.Id)
  168. if location.Length() == 0 {
  169. delete(vl.vid2location, v.Id)
  170. }
  171. }
  172. }
  173. func (vl *VolumeLayout) EnsureCorrectWritables(v *storage.VolumeInfo) {
  174. vl.accessLock.Lock()
  175. defer vl.accessLock.Unlock()
  176. vl.ensureCorrectWritables(v.Id)
  177. }
  178. func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) {
  179. if vl.enoughCopies(vid) && vl.isAllWritable(vid) {
  180. if !vl.oversizedVolumes.IsTrue(vid) {
  181. vl.setVolumeWritable(vid)
  182. }
  183. } else {
  184. vl.removeFromWritable(vid)
  185. }
  186. }
  187. func (vl *VolumeLayout) isAllWritable(vid needle.VolumeId) bool {
  188. for _, dn := range vl.vid2location[vid].list {
  189. if v, getError := dn.GetVolumesById(vid); getError == nil {
  190. if v.ReadOnly {
  191. return false
  192. }
  193. }
  194. }
  195. return true
  196. }
  197. func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool {
  198. return uint64(v.Size) >= vl.volumeSizeLimit
  199. }
  200. func (vl *VolumeLayout) isWritable(v *storage.VolumeInfo) bool {
  201. return !vl.isOversized(v) &&
  202. v.Version == needle.CurrentVersion &&
  203. !v.ReadOnly
  204. }
  205. func (vl *VolumeLayout) isEmpty() bool {
  206. vl.accessLock.RLock()
  207. defer vl.accessLock.RUnlock()
  208. return len(vl.vid2location) == 0
  209. }
  210. func (vl *VolumeLayout) Lookup(vid needle.VolumeId) []*DataNode {
  211. vl.accessLock.RLock()
  212. defer vl.accessLock.RUnlock()
  213. if location := vl.vid2location[vid]; location != nil {
  214. return location.list
  215. }
  216. return nil
  217. }
  218. func (vl *VolumeLayout) ListVolumeServers() (nodes []*DataNode) {
  219. vl.accessLock.RLock()
  220. defer vl.accessLock.RUnlock()
  221. for _, location := range vl.vid2location {
  222. nodes = append(nodes, location.list...)
  223. }
  224. return
  225. }
  226. func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*needle.VolumeId, uint64, *VolumeLocationList, error) {
  227. vl.accessLock.RLock()
  228. defer vl.accessLock.RUnlock()
  229. lenWriters := len(vl.writables)
  230. if lenWriters <= 0 {
  231. glog.V(0).Infoln("No more writable volumes!")
  232. return nil, 0, nil, errors.New("No more writable volumes!")
  233. }
  234. if option.DataCenter == "" {
  235. vid := vl.writables[rand.Intn(lenWriters)]
  236. locationList := vl.vid2location[vid]
  237. if locationList != nil {
  238. return &vid, count, locationList, nil
  239. }
  240. return nil, 0, nil, errors.New("Strangely vid " + vid.String() + " is on no machine!")
  241. }
  242. var vid needle.VolumeId
  243. var locationList *VolumeLocationList
  244. counter := 0
  245. for _, v := range vl.writables {
  246. volumeLocationList := vl.vid2location[v]
  247. for _, dn := range volumeLocationList.list {
  248. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  249. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  250. continue
  251. }
  252. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  253. continue
  254. }
  255. counter++
  256. if rand.Intn(counter) < 1 {
  257. vid, locationList = v, volumeLocationList
  258. }
  259. }
  260. }
  261. }
  262. return &vid, count, locationList, nil
  263. }
  264. func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
  265. vl.accessLock.RLock()
  266. defer vl.accessLock.RUnlock()
  267. if option.DataCenter == "" {
  268. return len(vl.writables)
  269. }
  270. counter := 0
  271. for _, v := range vl.writables {
  272. for _, dn := range vl.vid2location[v].list {
  273. if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
  274. if option.Rack != "" && dn.GetRack().Id() != NodeId(option.Rack) {
  275. continue
  276. }
  277. if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
  278. continue
  279. }
  280. counter++
  281. }
  282. }
  283. }
  284. return counter
  285. }
  286. func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
  287. toDeleteIndex := -1
  288. for k, id := range vl.writables {
  289. if id == vid {
  290. toDeleteIndex = k
  291. break
  292. }
  293. }
  294. if toDeleteIndex >= 0 {
  295. glog.V(0).Infoln("Volume", vid, "becomes unwritable")
  296. vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
  297. return true
  298. }
  299. return false
  300. }
  301. func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool {
  302. for _, v := range vl.writables {
  303. if v == vid {
  304. return false
  305. }
  306. }
  307. glog.V(0).Infoln("Volume", vid, "becomes writable")
  308. vl.writables = append(vl.writables, vid)
  309. return true
  310. }
  311. func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) bool {
  312. vl.accessLock.Lock()
  313. defer vl.accessLock.Unlock()
  314. if location, ok := vl.vid2location[vid]; ok {
  315. if location.Remove(dn) {
  316. vl.readonlyVolumes.Remove(vid, dn)
  317. vl.oversizedVolumes.Remove(vid, dn)
  318. if location.Length() < vl.rp.GetCopyCount() {
  319. glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount())
  320. return vl.removeFromWritable(vid)
  321. }
  322. }
  323. }
  324. return false
  325. }
  326. func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool {
  327. vl.accessLock.Lock()
  328. defer vl.accessLock.Unlock()
  329. vInfo, err := dn.GetVolumesById(vid)
  330. if err != nil {
  331. return false
  332. }
  333. vl.vid2location[vid].Set(dn)
  334. if vInfo.ReadOnly || isReadOnly {
  335. return false
  336. }
  337. if vl.enoughCopies(vid) {
  338. return vl.setVolumeWritable(vid)
  339. }
  340. return false
  341. }
  342. func (vl *VolumeLayout) enoughCopies(vid needle.VolumeId) bool {
  343. locations := vl.vid2location[vid].Length()
  344. desired := vl.rp.GetCopyCount()
  345. return locations == desired || (vl.replicationAsMin && locations > desired)
  346. }
  347. func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
  348. vl.accessLock.Lock()
  349. defer vl.accessLock.Unlock()
  350. // glog.V(0).Infoln("Volume", vid, "reaches full capacity.")
  351. return vl.removeFromWritable(vid)
  352. }
  353. func (vl *VolumeLayout) ToMap() map[string]interface{} {
  354. m := make(map[string]interface{})
  355. m["replication"] = vl.rp.String()
  356. m["ttl"] = vl.ttl.String()
  357. m["writables"] = vl.writables
  358. //m["locations"] = vl.vid2location
  359. return m
  360. }
  361. func (vl *VolumeLayout) Stats() *VolumeLayoutStats {
  362. vl.accessLock.RLock()
  363. defer vl.accessLock.RUnlock()
  364. ret := &VolumeLayoutStats{}
  365. freshThreshold := time.Now().Unix() - 60
  366. for vid, vll := range vl.vid2location {
  367. size, fileCount := vll.Stats(vid, freshThreshold)
  368. ret.FileCount += uint64(fileCount)
  369. ret.UsedSize += size
  370. if vl.readonlyVolumes.IsTrue(vid) {
  371. ret.TotalSize += size
  372. } else {
  373. ret.TotalSize += vl.volumeSizeLimit
  374. }
  375. }
  376. return ret
  377. }