You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

343 lines
8.4 KiB

  1. package resource_pool
  2. import (
  3. "errors"
  4. "fmt"
  5. "sync"
  6. "sync/atomic"
  7. "time"
  8. )
  9. type idleHandle struct {
  10. handle interface{}
  11. keepUntil *time.Time
  12. }
  13. type TooManyHandles struct {
  14. location string
  15. }
  16. func (t TooManyHandles) Error() string {
  17. return fmt.Sprintf("Too many handles to %s", t.location)
  18. }
  19. type OpenHandleError struct {
  20. location string
  21. err error
  22. }
  23. func (o OpenHandleError) Error() string {
  24. return fmt.Sprintf("Failed to open resource handle: %s (%v)", o.location, o.err)
  25. }
  26. // A resource pool implementation where all handles are associated to the
  27. // same resource location.
  28. type simpleResourcePool struct {
  29. options Options
  30. numActive *int32 // atomic counter
  31. activeHighWaterMark *int32 // atomic / monotonically increasing value
  32. openTokens Semaphore
  33. mutex sync.Mutex
  34. location string // guard by mutex
  35. idleHandles []*idleHandle // guarded by mutex
  36. isLameDuck bool // guarded by mutex
  37. }
  38. // This returns a SimpleResourcePool, where all handles are associated to a
  39. // single resource location.
  40. func NewSimpleResourcePool(options Options) ResourcePool {
  41. numActive := new(int32)
  42. atomic.StoreInt32(numActive, 0)
  43. activeHighWaterMark := new(int32)
  44. atomic.StoreInt32(activeHighWaterMark, 0)
  45. var tokens Semaphore
  46. if options.OpenMaxConcurrency > 0 {
  47. tokens = NewBoundedSemaphore(uint(options.OpenMaxConcurrency))
  48. }
  49. return &simpleResourcePool{
  50. location: "",
  51. options: options,
  52. numActive: numActive,
  53. activeHighWaterMark: activeHighWaterMark,
  54. openTokens: tokens,
  55. mutex: sync.Mutex{},
  56. idleHandles: make([]*idleHandle, 0, 0),
  57. isLameDuck: false,
  58. }
  59. }
  60. // See ResourcePool for documentation.
  61. func (p *simpleResourcePool) NumActive() int32 {
  62. return atomic.LoadInt32(p.numActive)
  63. }
  64. // See ResourcePool for documentation.
  65. func (p *simpleResourcePool) ActiveHighWaterMark() int32 {
  66. return atomic.LoadInt32(p.activeHighWaterMark)
  67. }
  68. // See ResourcePool for documentation.
  69. func (p *simpleResourcePool) NumIdle() int {
  70. p.mutex.Lock()
  71. defer p.mutex.Unlock()
  72. return len(p.idleHandles)
  73. }
  74. // SimpleResourcePool can only register a single (network, address) entry.
  75. // Register should be call before any Get calls.
  76. func (p *simpleResourcePool) Register(resourceLocation string) error {
  77. if resourceLocation == "" {
  78. return errors.New("Invalid resource location")
  79. }
  80. p.mutex.Lock()
  81. defer p.mutex.Unlock()
  82. if p.isLameDuck {
  83. return fmt.Errorf(
  84. "cannot register %s to lame duck resource pool",
  85. resourceLocation)
  86. }
  87. if p.location == "" {
  88. p.location = resourceLocation
  89. return nil
  90. }
  91. return errors.New("SimpleResourcePool can only register one location")
  92. }
  93. // SimpleResourcePool will enter lame duck mode upon calling Unregister.
  94. func (p *simpleResourcePool) Unregister(resourceLocation string) error {
  95. p.EnterLameDuckMode()
  96. return nil
  97. }
  98. func (p *simpleResourcePool) ListRegistered() []string {
  99. p.mutex.Lock()
  100. defer p.mutex.Unlock()
  101. if p.location != "" {
  102. return []string{p.location}
  103. }
  104. return []string{}
  105. }
  106. func (p *simpleResourcePool) getLocation() (string, error) {
  107. p.mutex.Lock()
  108. defer p.mutex.Unlock()
  109. if p.location == "" {
  110. return "", fmt.Errorf(
  111. "resource location is not set for SimpleResourcePool")
  112. }
  113. if p.isLameDuck {
  114. return "", fmt.Errorf(
  115. "lame duck resource pool cannot return handles to %s",
  116. p.location)
  117. }
  118. return p.location, nil
  119. }
  120. // This gets an active resource from the resource pool. Note that the
  121. // resourceLocation argument is ignored (The handles are associated to the
  122. // resource location provided by the first Register call).
  123. func (p *simpleResourcePool) Get(unused string) (ManagedHandle, error) {
  124. activeCount := atomic.AddInt32(p.numActive, 1)
  125. if p.options.MaxActiveHandles > 0 &&
  126. activeCount > p.options.MaxActiveHandles {
  127. atomic.AddInt32(p.numActive, -1)
  128. return nil, TooManyHandles{p.location}
  129. }
  130. highest := atomic.LoadInt32(p.activeHighWaterMark)
  131. for activeCount > highest &&
  132. !atomic.CompareAndSwapInt32(
  133. p.activeHighWaterMark,
  134. highest,
  135. activeCount) {
  136. highest = atomic.LoadInt32(p.activeHighWaterMark)
  137. }
  138. if h := p.getIdleHandle(); h != nil {
  139. return h, nil
  140. }
  141. location, err := p.getLocation()
  142. if err != nil {
  143. atomic.AddInt32(p.numActive, -1)
  144. return nil, err
  145. }
  146. if p.openTokens != nil {
  147. // Current implementation does not wait for tokens to become available.
  148. // If that causes availability hits, we could increase the wait,
  149. // similar to simple_pool.go.
  150. if p.openTokens.TryAcquire(0) {
  151. defer p.openTokens.Release()
  152. } else {
  153. // We could not immediately acquire a token.
  154. // Instead of waiting
  155. atomic.AddInt32(p.numActive, -1)
  156. return nil, OpenHandleError{
  157. p.location, errors.New("Open Error: reached OpenMaxConcurrency")}
  158. }
  159. }
  160. handle, err := p.options.Open(location)
  161. if err != nil {
  162. atomic.AddInt32(p.numActive, -1)
  163. return nil, OpenHandleError{p.location, err}
  164. }
  165. return NewManagedHandle(p.location, handle, p, p.options), nil
  166. }
  167. // See ResourcePool for documentation.
  168. func (p *simpleResourcePool) Release(handle ManagedHandle) error {
  169. if pool, ok := handle.Owner().(*simpleResourcePool); !ok || pool != p {
  170. return errors.New(
  171. "Resource pool cannot take control of a handle owned " +
  172. "by another resource pool")
  173. }
  174. h := handle.ReleaseUnderlyingHandle()
  175. if h != nil {
  176. // We can unref either before or after queuing the idle handle.
  177. // The advantage of unref-ing before queuing is that there is
  178. // a higher chance of successful Get when number of active handles
  179. // is close to the limit (but potentially more handle creation).
  180. // The advantage of queuing before unref-ing is that there's a
  181. // higher chance of reusing handle (but potentially more Get failures).
  182. atomic.AddInt32(p.numActive, -1)
  183. p.queueIdleHandles(h)
  184. }
  185. return nil
  186. }
  187. // See ResourcePool for documentation.
  188. func (p *simpleResourcePool) Discard(handle ManagedHandle) error {
  189. if pool, ok := handle.Owner().(*simpleResourcePool); !ok || pool != p {
  190. return errors.New(
  191. "Resource pool cannot take control of a handle owned " +
  192. "by another resource pool")
  193. }
  194. h := handle.ReleaseUnderlyingHandle()
  195. if h != nil {
  196. atomic.AddInt32(p.numActive, -1)
  197. if err := p.options.Close(h); err != nil {
  198. return fmt.Errorf("failed to close resource handle: %v", err)
  199. }
  200. }
  201. return nil
  202. }
  203. // See ResourcePool for documentation.
  204. func (p *simpleResourcePool) EnterLameDuckMode() {
  205. p.mutex.Lock()
  206. toClose := p.idleHandles
  207. p.isLameDuck = true
  208. p.idleHandles = []*idleHandle{}
  209. p.mutex.Unlock()
  210. p.closeHandles(toClose)
  211. }
  212. // This returns an idle resource, if there is one.
  213. func (p *simpleResourcePool) getIdleHandle() ManagedHandle {
  214. var toClose []*idleHandle
  215. defer func() {
  216. // NOTE: Must keep the closure around to late bind the toClose slice.
  217. p.closeHandles(toClose)
  218. }()
  219. now := p.options.getCurrentTime()
  220. p.mutex.Lock()
  221. defer p.mutex.Unlock()
  222. var i int
  223. for i = 0; i < len(p.idleHandles); i++ {
  224. idle := p.idleHandles[i]
  225. if idle.keepUntil == nil || now.Before(*idle.keepUntil) {
  226. break
  227. }
  228. }
  229. if i > 0 {
  230. toClose = p.idleHandles[0:i]
  231. }
  232. if i < len(p.idleHandles) {
  233. idle := p.idleHandles[i]
  234. p.idleHandles = p.idleHandles[i+1:]
  235. return NewManagedHandle(p.location, idle.handle, p, p.options)
  236. }
  237. if len(p.idleHandles) > 0 {
  238. p.idleHandles = []*idleHandle{}
  239. }
  240. return nil
  241. }
  242. // This adds an idle resource to the pool.
  243. func (p *simpleResourcePool) queueIdleHandles(handle interface{}) {
  244. var toClose []*idleHandle
  245. defer func() {
  246. // NOTE: Must keep the closure around to late bind the toClose slice.
  247. p.closeHandles(toClose)
  248. }()
  249. now := p.options.getCurrentTime()
  250. var keepUntil *time.Time
  251. if p.options.MaxIdleTime != nil {
  252. // NOTE: Assign to temp variable first to work around compiler bug
  253. x := now.Add(*p.options.MaxIdleTime)
  254. keepUntil = &x
  255. }
  256. p.mutex.Lock()
  257. defer p.mutex.Unlock()
  258. if p.isLameDuck {
  259. toClose = []*idleHandle{
  260. {handle: handle},
  261. }
  262. return
  263. }
  264. p.idleHandles = append(
  265. p.idleHandles,
  266. &idleHandle{
  267. handle: handle,
  268. keepUntil: keepUntil,
  269. })
  270. nIdleHandles := uint32(len(p.idleHandles))
  271. if nIdleHandles > p.options.MaxIdleHandles {
  272. handlesToClose := nIdleHandles - p.options.MaxIdleHandles
  273. toClose = p.idleHandles[0:handlesToClose]
  274. p.idleHandles = p.idleHandles[handlesToClose:nIdleHandles]
  275. }
  276. }
  277. // Closes resources, at this point it is assumed that this resources
  278. // are no longer referenced from the main idleHandles slice.
  279. func (p *simpleResourcePool) closeHandles(handles []*idleHandle) {
  280. for _, handle := range handles {
  281. _ = p.options.Close(handle.handle)
  282. }
  283. }