Skip to content
This repository has been archived by the owner on Nov 20, 2024. It is now read-only.

Commit

Permalink
api: pull_requests: add pagination
Browse files Browse the repository at this point in the history
This adds [Github-like pagination][1] to the `/api/pull_requests` API
call.

The following rules are applied:
- Pages start counting at 1.
- A page can be selected via the `page` query argument, the number of
  entries can be set via the `per_page` query argument.
- The ["Link" HTTP-header][2] provide links to appropriate pages to
  navigate to (`next`, `last`, `first`, `prev`)
- Building and queued jobs (collectively called *"pending jobs"* in the
  following) are always on the first page.
- If the number pending jobs is lesser than `per_page`, a subset of the
  finished jobs is shown. Their number is determined by the difference
  between `per_page` and the number of pending jobs.
- If the number of pending jobs is greater than or equal to `per_page`,
  finished jobs start on the second page.

[1]: https://developer.github.com/v3/#pagination
[2]: https://tools.ietf.org/html/rfc8288
  • Loading branch information
miri64 committed Jul 30, 2018
1 parent 66ae880 commit 6f12190
Showing 1 changed file with 77 additions and 4 deletions.
81 changes: 77 additions & 4 deletions github_webhook.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import tornado.websocket
import json
import os
import urllib.parse

from log import log

Expand All @@ -12,6 +13,56 @@

config.set_default("url_prefix", r"")


class Pagination(object):
DEFAULT_PER_PAGE = 100
DEFAULT_PAGE = 1

def __init__(self, request):
self.page = request.get_argument("page", Pagination.DEFAULT_PAGE)
self.per_page = request.get_argument("per_page",
Pagination.DEFAULT_PER_PAGE)
self.rels_template = "<%s://%s%s?{qs}>; rel=\"{rel}\"" % \
(request.protocol, request.host, request.path)
self.request = request

def _get_rel(self, rel_name, rel_page):
return {
"rel": rel_name,
"qs": urllib.parse.urlencode({
"page": rel_page,
"per_page": self.per_page
})
}

def set_header(self, objects_num=0):
if objects_num <= self.per_page:
pages = 1
else:
pages = (objects_num // self.per_page)
if (objects_num % self.per_page):
pages += 1
if self.page > pages:
self.page = pages
rels = []
if self.page > 1:
rels.extend([
self.rels_template.format(**self._get_rel("first", 1)),
self.rels_template.format(**self._get_rel("prev",
self.page - 1))
])
if (self.page < pages):
rels.append(
self.rels_template.format(**self._get_rel("next",
self.page + 1))
)
rels.append(
self.rels_template.format(**self._get_rel("last",
pages))
)
self.request.set_header("Link", ", ".join(rels))


class GithubWebhook(object):
def __init__(s, port, prs, github_handlers):

Expand Down Expand Up @@ -53,31 +104,53 @@ def gen_pull_entry(pr, job, time, extra = None):
})
return res

pagination = Pagination(self)
self.set_header("Content-Type", 'application/json; charset="utf-8"')
self.set_header("Access-Control-Allow-Credentials", "false")
self.set_header("Access-Control-Allow-Origin", "*")

building, queued, finished = self.prs.list()
response = {}

if building:
building_num = len(building)
queued_num = len(building)
pending_num = building_num + queued_num
finished_num = len(finished)
page_1_finished = 0 # number of finished jobs on page 1
if (pending_num < pagination.per_page):
page_1_finished = pagination.per_page - pending_num
pagination.set_header((finished_num - page_1_finished) +
# put pending always on first page regardless
# of actual number
(self.per_page if pending_num else 0))

if (pagination.page == 1) and (building_num > 0):
_building = []
for pr, job in building:
_building.append(
gen_pull_entry(pr, job, job.time_started))
response['building'] = _building

if queued:
if (pagination.page == 1) and (queued_num > 0):
_queued = []
for pr, job in queued:
_queued.append(
gen_pull_entry(pr, job, job.time_queued))

response['queued'] = _queued

if finished:
if (finished_num > 0):
_finished = []
for pr, job in finished:
if (pagination.page > 1):
first = ((pagination.page - 1) * pagination.per_page) + \
page_1_finished
elif (page_1_finished > 0):
first = 0
else:
# we are on first page and finished jobs don't fit
# => skip loop
finished = []
for pr, job in finished[first:(first + pagination.per_page)]:
status_html_snipfile = os.path.join(config.data_dir,
pr.base_full_name, str(pr.nr), job.arg, "prstatus.html.snip")

Expand Down

0 comments on commit 6f12190

Please sign in to comment.