-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdequeue_test.go
116 lines (95 loc) · 3.13 KB
/
dequeue_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
package sqlq_test
import (
"database/sql"
"sync"
"testing"
"time"
. "github.com/mergestat/sqlq"
"github.com/pkg/errors"
)
func setConcurrencyAndPriority(db *sql.DB, queue Queue, c, p int) error {
result, err := db.Exec("UPDATE sqlq.queues SET concurrency = $2, priority = $3 WHERE queues.name = $1", queue, c, p)
if affected, _ := result.RowsAffected(); affected == 0 {
return errors.Errorf("no queue with name = %q", queue)
}
return err
}
func TestDequeue(t *testing.T) {
var upstream = MustOpen(PostgresUrl)
defer upstream.Close()
var queues = []Queue{"critical", "default"}
for i := 0; i < 10; i++ {
if _, err := Enqueue(upstream, queues[i%2], NewAdditionJob(100, 200)); err != nil {
t.Fatalf("failed to enqueue job: %v", err)
}
}
if err := setConcurrencyAndPriority(upstream, "critical", 1, 0); err != nil {
t.Fatalf("failed to update queue concurrency: %v", err)
}
if err := setConcurrencyAndPriority(upstream, "default", 2, 1); err != nil {
t.Fatalf("failed to update queue concurrency: %v", err)
}
for i := 1; i <= 3; i++ {
job, err := Dequeue(upstream, queues)
if err != nil || job == nil {
t.Fatalf("failed to dequeue: %#v", err) // shouldn't error out and must return a job
}
t.Logf("dequeued: %#v", job)
if i == 1 {
// for the first run, it MUST pull from the critical queue
if job.Queue != "critical" {
t.Fatalf("expected first job to be from 'critical' queue: got=%q", job.Queue)
}
continue
}
// for the 2nd and 3rd run, it MUST pull from the default queue
if job.Queue != "default" {
t.Fatalf("expected job to be from 'default' queue: got=%q", job.Queue)
}
}
// since all queues are running at max concurrency, next Dequeue() should return no job
if job, err := Dequeue(upstream, queues); err != nil || job != nil {
t.FailNow()
}
}
func TestDequeueConcurrent(t *testing.T) {
var upstream = MustOpen(PostgresUrl)
defer upstream.Close()
const queue Queue = "compute-intensive"
for i := 0; i < 5; i++ {
if _, err := Enqueue(upstream, queue, NewAdditionJob(100, 200)); err != nil {
t.Fatalf("failed to enqueue job: %v", err)
}
}
if err := setConcurrencyAndPriority(upstream, queue, 2, 1); err != nil {
t.Fatalf("failed to update queue concurrency: %v", err)
}
var wg sync.WaitGroup
var result = make(chan *Job, 2)
for i := 0; i < 5; i++ {
// this is as close to "real-time" race-condition as we can get in an automated test scenario
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(time.Second * 1)
if job, err := Dequeue(upstream, []Queue{queue}); err != nil {
t.Errorf("failed to dequeue: %#v", err)
} else {
result <- job
}
}()
}
// close result channel after all results are written to it
go func() { wg.Wait(); close(result) }()
var allowed = 2
for job := range result {
if allowed == 0 && job != nil {
t.Errorf("dequeued more jobs than allowed by concurrency")
} else if job != nil {
allowed -= 1 // decrease count of allowed jobs
}
}
// wait for all Dequeue() to finish so that we do not close the database connection while they are running.
// doing so leaves un-related error messages in the test's logs.
wg.Wait()
}