|
| 1 | +package util |
| 2 | + |
| 3 | +import ( |
| 4 | + "bytes" |
| 5 | + "github.com/prometheus/client_golang/prometheus" |
| 6 | + "github.com/prometheus/client_golang/prometheus/testutil" |
| 7 | + "github.com/stretchr/testify/require" |
| 8 | + "sync" |
| 9 | + "testing" |
| 10 | +) |
| 11 | + |
| 12 | +func TestNewWorkerPool_CreateMultiplesPoolsWithSameRegistry(t *testing.T) { |
| 13 | + reg := prometheus.NewPedanticRegistry() |
| 14 | + wp1 := NewWorkerPool("test1", 100, reg) |
| 15 | + defer wp1.Stop() |
| 16 | + wp2 := NewWorkerPool("test2", 100, reg) |
| 17 | + defer wp2.Stop() |
| 18 | +} |
| 19 | + |
| 20 | +func TestWorkerPool_TestMetric(t *testing.T) { |
| 21 | + reg := prometheus.NewPedanticRegistry() |
| 22 | + workerPool := NewWorkerPool("test1", 1, reg) |
| 23 | + defer workerPool.Stop() |
| 24 | + |
| 25 | + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` |
| 26 | + # HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs. |
| 27 | + # TYPE cortex_worker_pool_fallback_total counter |
| 28 | + cortex_worker_pool_fallback_total{name="test1"} 0 |
| 29 | +`), "cortex_worker_pool_fallback_total")) |
| 30 | + |
| 31 | + wg := &sync.WaitGroup{} |
| 32 | + wg.Add(1) |
| 33 | + |
| 34 | + // Block the first job |
| 35 | + workerPool.Submit(func() { |
| 36 | + wg.Wait() |
| 37 | + }) |
| 38 | + |
| 39 | + // create an extra job to increment the metric |
| 40 | + workerPool.Submit(func() {}) |
| 41 | + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` |
| 42 | + # HELP cortex_worker_pool_fallback_total The total number additional go routines that needed to be created to run jobs. |
| 43 | + # TYPE cortex_worker_pool_fallback_total counter |
| 44 | + cortex_worker_pool_fallback_total{name="test1"} 1 |
| 45 | +`), "cortex_worker_pool_fallback_total")) |
| 46 | + |
| 47 | + wg.Done() |
| 48 | +} |
| 49 | + |
| 50 | +func TestWorkerPool_ShouldFallbackWhenAllWorkersAreBusy(t *testing.T) { |
| 51 | + reg := prometheus.NewPedanticRegistry() |
| 52 | + numberOfWorkers := 10 |
| 53 | + workerPool := NewWorkerPool("test1", numberOfWorkers, reg) |
| 54 | + defer workerPool.Stop() |
| 55 | + |
| 56 | + m := sync.Mutex{} |
| 57 | + blockerWg := sync.WaitGroup{} |
| 58 | + blockerWg.Add(numberOfWorkers) |
| 59 | + |
| 60 | + // Lets lock all submited jobs |
| 61 | + m.Lock() |
| 62 | + |
| 63 | + for i := 0; i < numberOfWorkers; i++ { |
| 64 | + workerPool.Submit(func() { |
| 65 | + defer blockerWg.Done() |
| 66 | + m.Lock() |
| 67 | + m.Unlock() |
| 68 | + }) |
| 69 | + } |
| 70 | + |
| 71 | + // At this point all workers should be busy. lets try to create a new job |
| 72 | + wg := sync.WaitGroup{} |
| 73 | + wg.Add(1) |
| 74 | + workerPool.Submit(func() { |
| 75 | + defer wg.Done() |
| 76 | + }) |
| 77 | + |
| 78 | + // Make sure the last job ran to the end |
| 79 | + wg.Wait() |
| 80 | + |
| 81 | + // Lets release the jobs |
| 82 | + m.Unlock() |
| 83 | + |
| 84 | + blockerWg.Wait() |
| 85 | + |
| 86 | +} |
0 commit comments