From ea008b9238474185b7ab1c39f567b1f3c0bfd891 Mon Sep 17 00:00:00 2001 From: Aurelien FOUCRET Date: Mon, 11 Mar 2024 16:21:11 +0100 Subject: [PATCH] Apply suggestions from review. --- .../learning-to-rank-model-training.asciidoc | 6 ++++-- .../search-your-data/learning-to-rank-search-usage.asciidoc | 2 +- .../search/search-your-data/learning-to-rank.asciidoc | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc index 6122aad7e1699..fb026578bc00d 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-model-training.asciidoc @@ -45,7 +45,8 @@ feature_extractors=[ feature_name="title_bm25", query={"match": {"title": "{{query}}"}} ), - # We can use a script_score query to get the value of the field rating directly as a feature: + # We can use a script_score query to get the value + # of the field rating directly as a feature: QueryFeatureExtractor( feature_name="popularity", query={ @@ -55,7 +56,8 @@ feature_extractors=[ } }, ), - # We can execute a script on the value of the query and use the return value as a feature: + # We can execute a script on the value of the query + # and use the return value as a feature: QueryFeatureExtractor( feature_name="query_length", query={ diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc index e4497bd3211c1..1d040a116ad9a 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -13,7 +13,7 @@ For more information, see {subscriptions}. [[learning-to-rank-rescorer]] ==== Learning To Rank as a rescorer -Once your LTR model is trained and deployed in {es}, it can be used as a <> in the <>: +Once your LTR model is trained and deployed in {es}, it can be used as a <> in the <>: [source,console] ---- diff --git a/docs/reference/search/search-your-data/learning-to-rank.asciidoc b/docs/reference/search/search-your-data/learning-to-rank.asciidoc index 8af8a56e77526..08fad9db9c0f6 100644 --- a/docs/reference/search/search-your-data/learning-to-rank.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank.asciidoc @@ -109,11 +109,11 @@ The heart of LTR is of course an ML model. A model is trained using the training The LTR space is evolving rapidly and many approaches and model types are being experimented with. In practice {es} relies specifically on gradient boosted decision tree -(GBDT) models for LTR inference. +(https://en.wikipedia.org/wiki/Gradient_boosting#Gradient_tree_boosting[GBDT^]) models for LTR inference. Note that {es} supports model inference but the training process itself must happen outside of {es}, using a GBDT model. Among the most popular LTR models -used today, LambdaMART provides strong ranking performance with low inference +used today, https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf[LambdaMART^] provides strong ranking performance with low inference latencies. It relies on GBDT models and is therefore a perfect fit for LTR in {es}.