diff --git a/README.md b/README.md index 7130fade..3c05eb25 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,7 @@ Access the generated M3U playlist at `http://:8080/playlist.m3u`. |-----------------------------|----------------------------------------------------------|---------------|------------------------------------------------| | M3U_URL_1, M3U_URL_2, M3U_URL_X | Set M3U URLs as environment variables. | N/A | Any valid M3U URLs | | M3U_MAX_CONCURRENCY_1, M3U_MAX_CONCURRENCY_2, M3U_MAX_CONCURRENCY_X | Set max concurrency. | 1 | Any integer | +| MAX_RETRIES | Set max number of retries (loop) across all M3Us while streaming. 0 to never stop retrying (beware of throttling from provider). | 5 | Any integer greater than or equal 0 | | REDIS_ADDR | Set Redis server address | N/A | e.g. localhost:6379 | | REDIS_PASS | Set Redis server password | N/A | Any string | | REDIS_DB | Set Redis server database to be used | 0 | 0 to 15 | diff --git a/stream_handler.go b/stream_handler.go index 8f2e820a..2fdf07bc 100644 --- a/stream_handler.go +++ b/stream_handler.go @@ -15,7 +15,7 @@ import ( "strings" ) -func loadBalancer(stream database.StreamInfo, previous []int) (*http.Response, string, int, error) { +func loadBalancer(stream database.StreamInfo, previous *[]int) (*http.Response, string, int, error) { debug := os.Getenv("DEBUG") == "true" m3uIndexes := utils.GetM3UIndexes() @@ -24,14 +24,22 @@ func loadBalancer(stream database.StreamInfo, previous []int) (*http.Response, s return db.ConcurrencyPriorityValue(i) > db.ConcurrencyPriorityValue(j) }) - const maxLaps = 5 + maxLapsString := os.Getenv("MAX_RETRIES") + maxLaps, err := strconv.Atoi(strings.TrimSpace(maxLapsString)) + if err != nil || maxLaps < 0 { + maxLaps = 5 + } + lap := 0 - for lap < maxLaps { + for lap < maxLaps || maxLaps == 0 { + if debug { + log.Printf("[DEBUG] Stream attempt %d out of %d\n", lap+1, maxLaps) + } allSkipped := true // Assume all URLs might be skipped for _, index := range m3uIndexes { - if slices.Contains(previous, index) { + if slices.Contains(*previous, index) { log.Printf("Skipping M3U_%d: marked as previous stream\n", index+1) continue } @@ -66,7 +74,7 @@ func loadBalancer(stream database.StreamInfo, previous []int) (*http.Response, s if debug { log.Printf("[DEBUG] All streams skipped in lap %d\n", lap) } - break + *previous = []int{} } lap++ @@ -146,6 +154,7 @@ func streamHandler(w http.ResponseWriter, r *http.Request, db *database.Instance var selectedUrl string testedIndexes := []int{} + firstWrite := true var resp *http.Response @@ -158,15 +167,14 @@ func streamHandler(w http.ResponseWriter, r *http.Request, db *database.Instance } return default: - resp, selectedUrl, selectedIndex, err = loadBalancer(stream, testedIndexes) + resp, selectedUrl, selectedIndex, err = loadBalancer(stream, &testedIndexes) if err != nil { log.Printf("Error reloading stream for %s: %v\n", streamSlug, err) - http.Error(w, "Error fetching stream. Exhausted all streams.", http.StatusInternalServerError) return } // HTTP header initialization - if len(testedIndexes) == 0 { + if firstWrite { w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Access-Control-Allow-Origin", "*") for k, v := range resp.Header { @@ -179,6 +187,7 @@ func streamHandler(w http.ResponseWriter, r *http.Request, db *database.Instance if debug { log.Printf("[DEBUG] Headers set for response: %v\n", w.Header()) } + firstWrite = false } exitStatus := make(chan int)