fix: limit the different worker pools to available CPU cores. Should resolve #3189

This commit is contained in:
Gabe Kangas
2023-07-18 20:26:44 -07:00
parent b1381170c1
commit d0376cdc75
4 changed files with 14 additions and 16 deletions

View File

@@ -51,7 +51,7 @@ func TestMain(m *testing.M) {
// this test ensures that `SendToWebhooks` without a `WaitGroup` doesn't panic.
func TestPublicSend(t *testing.T) {
// Send enough events to be sure at least one worker delivers a second event.
const eventsCount = webhookWorkerPoolSize + 1
eventsCount := webhookWorkerPoolSize + 1
var wg sync.WaitGroup
wg.Add(eventsCount)
@@ -267,7 +267,7 @@ func TestParallel(t *testing.T) {
myId := atomic.AddUint32(&calls, 1)
// We made it to the pool size + 1 event, so we're done with the test.
if myId == webhookWorkerPoolSize+1 {
if myId == uint32(webhookWorkerPoolSize)+1 {
close(finished)
return
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"net/http"
"runtime"
"sync"
log "github.com/sirupsen/logrus"
@@ -12,10 +13,8 @@ import (
"github.com/owncast/owncast/models"
)
const (
// webhookWorkerPoolSize defines the number of concurrent HTTP webhook requests.
webhookWorkerPoolSize = 10
)
// webhookWorkerPoolSize defines the number of concurrent HTTP webhook requests.
var webhookWorkerPoolSize = runtime.GOMAXPROCS(0)
// Job struct bundling the webhook and the payload in one struct.
type Job struct {