diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 00000000..df2eb087 --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 2eae85351f1730f4826a6f37437c80cd +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/_modules/dicee/abstracts.html b/_modules/dicee/abstracts.html new file mode 100644 index 00000000..e7829e48 --- /dev/null +++ b/_modules/dicee/abstracts.html @@ -0,0 +1,814 @@ + + + + + + + + dicee.abstracts - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.abstracts

+import os
+import datetime
+from .static_funcs import load_model_ensemble, load_model, save_checkpoint_model
+import torch
+from typing import List, Tuple
+import random
+from abc import ABC
+import pytorch_lightning
+
+
+
+[docs] +class AbstractTrainer: + """ + Abstract class for Trainer class for knowledge graph embedding models + + + Parameter + --------- + args : str + ? + + callbacks: list + ? + """ + + def __init__(self, args, callbacks): + self.attributes = args + self.callbacks = callbacks + self.is_global_zero = True + # Set True to use Model summary callback of pl. + torch.manual_seed(self.attributes.random_seed) + torch.cuda.manual_seed_all(self.attributes.random_seed) + +
+[docs] + def on_fit_start(self, *args, **kwargs): + """ + A function to call callbacks before the training starts. + + Parameter + --------- + args + + kwargs + + + Returns + ------- + None + """ + for c in self.callbacks: + c.on_fit_start(*args, **kwargs)
+ + +
+[docs] + def on_fit_end(self, *args, **kwargs): + """ + A function to call callbacks at the ned of the training. + + Parameter + --------- + args + + kwargs + + + Returns + ------- + None + """ + for c in self.callbacks: + c.on_fit_end(*args, **kwargs)
+ + +
+[docs] + def on_train_epoch_end(self, *args, **kwargs): + """ + A function to call callbacks at the end of an epoch. + + Parameter + --------- + args + + kwargs + + + Returns + ------- + None + """ + for c in self.callbacks: + c.on_train_epoch_end(*args, **kwargs)
+ + +
+[docs] + def on_train_batch_end(self, *args, **kwargs): + """ + A function to call callbacks at the end of each mini-batch during training. + + Parameter + --------- + args + + kwargs + + + Returns + ------- + None + """ + for c in self.callbacks: + c.on_train_batch_end(*args, **kwargs)
+ + +
+[docs] + @staticmethod + def save_checkpoint(full_path: str, model) -> None: + """ + A static function to save a model into disk + + Parameter + --------- + full_path : str + + model: + + + Returns + ------- + None + """ + torch.save(model.state_dict(), full_path)
+
+ + + +
+[docs] +class BaseInteractiveKGE: + """ + Abstract/base class for using knowledge graph embedding models interactively. + + + Parameter + --------- + path_of_pretrained_model_dir : str + ? + + construct_ensemble: boolean + ? + + model_name: str + apply_semantic_constraint : boolean + """ + + def __init__(self, path: str, construct_ensemble: bool = False, model_name: str = None, + apply_semantic_constraint: bool = False): + try: + assert os.path.isdir(path) + except AssertionError: + raise AssertionError(f'Could not find a directory {path}') + self.path = path + # (1) Load model... + self.construct_ensemble = construct_ensemble + self.apply_semantic_constraint = apply_semantic_constraint + if construct_ensemble: + self.model, self.entity_to_idx, self.relation_to_idx = load_model_ensemble(self.path) + else: + if model_name: + self.model, self.entity_to_idx, self.relation_to_idx = load_model(self.path, model_name=model_name) + else: + self.model, self.entity_to_idx, self.relation_to_idx = load_model(self.path) + self.num_entities = len(self.entity_to_idx) + self.num_relations = len(self.relation_to_idx) + self.entity_to_idx: dict + self.relation_to_idx: dict + assert list(self.entity_to_idx.values()) == list(range(0, len(self.entity_to_idx))) + assert list(self.relation_to_idx.values()) == list(range(0, len(self.relation_to_idx))) + + self.idx_to_entity = {v: k for k, v in self.entity_to_idx.items()} + self.idx_to_relations = {v: k for k, v in self.relation_to_idx.items()} + +
+[docs] + def get_domain_of_relation(self, rel: str) -> List[str]: + x = [self.idx_to_entity[i] for i in self.domain_per_rel[self.relation_to_idx[rel]]] + res = set(x) + assert len(x) == len(res) + return res
+ + +
+[docs] + def get_range_of_relation(self, rel: str) -> List[str]: + x = [self.idx_to_entity[i] for i in self.range_per_rel[self.relation_to_idx[rel]]] + res = set(x) + assert len(x) == len(res) + return res
+ + +
+[docs] + def set_model_train_mode(self) -> None: + """ + Setting the model into training mode + + + Parameter + --------- + + Returns + --------- + """ + self.model.train() + for parameter in self.model.parameters(): + parameter.requires_grad = True
+ + +
+[docs] + def set_model_eval_mode(self) -> None: + """ + Setting the model into eval mode + + + Parameter + --------- + + Returns + --------- + """ + + self.model.eval() + for parameter in self.model.parameters(): + parameter.requires_grad = False
+ + + @property + def name(self): + return self.model.name + +
+[docs] + def sample_entity(self, n: int) -> List[str]: + assert isinstance(n, int) + assert n >= 0 + return random.sample([i for i in self.entity_to_idx.keys()], n)
+ + +
+[docs] + def sample_relation(self, n: int) -> List[str]: + assert isinstance(n, int) + assert n >= 0 + return random.sample([i for i in self.relation_to_idx.keys()], n)
+ + +
+[docs] + def is_seen(self, entity: str = None, relation: str = None) -> bool: + if entity is not None: + return True if self.entity_to_idx.get(entity) else False + if relation is not None: + return True if self.relation_to_idx.get(relation) else False
+ + +
+[docs] + def save(self) -> None: + t = str(datetime.datetime.now()) + if self.construct_ensemble: + save_checkpoint_model(self.model, path=self.path + f'/model_ensemble_interactive_{str(t)}.pt') + else: + save_checkpoint_model(self.model, path=self.path + f'/model_interactive_{str(t)}.pt')
+ + +
+[docs] + def index_triple(self, head_entity: List[str], relation: List[str], tail_entity: List[str]) -> Tuple[ + torch.LongTensor, torch.LongTensor, torch.LongTensor]: + """ + Index Triple + + Parameter + --------- + head_entity: List[str] + + String representation of selected entities. + + relation: List[str] + + String representation of selected relations. + + tail_entity: List[str] + + String representation of selected entities. + + Returns: Tuple + --------- + + pytorch tensor of triple score + """ + n = len(head_entity) + assert n == len(relation) == len(tail_entity) + idx_head_entity = torch.LongTensor([self.entity_to_idx[i] for i in head_entity]).reshape(n, 1) + idx_relation = torch.LongTensor([self.relation_to_idx[i] for i in relation]).reshape(n, 1) + idx_tail_entity = torch.LongTensor([self.entity_to_idx[i] for i in tail_entity]).reshape(n, 1) + return idx_head_entity, idx_relation, idx_tail_entity
+ + +
+[docs] + def add_new_entity_embeddings(self, entity_name: str = None, embeddings: torch.FloatTensor = None): + assert isinstance(entity_name, str) and isinstance(embeddings, torch.FloatTensor) + + if entity_name in self.entity_to_idx: + print(f'Entity ({entity_name}) exists..') + else: + self.entity_to_idx[entity_name] = len(self.entity_to_idx) + self.idx_to_entity[self.entity_to_idx[entity_name]] = entity_name + self.num_entities += 1 + self.model.num_entities += 1 + self.model.entity_embeddings.weight.data = torch.cat( + (self.model.entity_embeddings.weight.data.detach(), embeddings.unsqueeze(0)), dim=0) + self.model.entity_embeddings.num_embeddings += 1
+ + +
+[docs] + def get_entity_embeddings(self, items: List[str]): + """ + Return embedding of an entity given its string representation + + + Parameter + --------- + items: + entities + + Returns + --------- + """ + return self.model.entity_embeddings(torch.LongTensor([self.entity_to_idx[i] for i in items]))
+ + +
+[docs] + def get_relation_embeddings(self, items: List[str]): + """ + Return embedding of a relation given its string representation + + + Parameter + --------- + items: + relations + + Returns + --------- + """ + return self.model.relation_embeddings(torch.LongTensor([self.relation_to_idx[i] for i in items]))
+ + +
+[docs] + def construct_input_and_output(self, head_entity: List[str], relation: List[str], tail_entity: List[str], labels): + """ + Construct a data point + :param head_entity: + :param relation: + :param tail_entity: + :param labels: + :return: + """ + idx_head_entity, idx_relation, idx_tail_entity = self.index_triple(head_entity, relation, tail_entity) + x = torch.hstack((idx_head_entity, idx_relation, idx_tail_entity)) + # Hard Labels + labels: object = torch.FloatTensor(labels) + return x, labels
+ + +
+[docs] + def parameters(self): + return self.model.parameters()
+
+ + + +
+[docs] +class AbstractCallback(ABC, pytorch_lightning.callbacks.Callback): + """ + Abstract class for Callback class for knowledge graph embedding models + + + Parameter + --------- + + """ + + def __init__(self): + pass + +
+[docs] + def on_init_start(self, *args, **kwargs): + """ + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + pass
+ + +
+[docs] + def on_init_end(self, *args, **kwargs): + """ + Call at the beginning of the training. + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + pass
+ + +
+[docs] + def on_fit_start(self, trainer, model): + """ + Call at the beginning of the training. + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + return
+ + +
+[docs] + def on_train_epoch_end(self, trainer, model): + """ + Call at the end of each epoch during training. + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + pass
+ + +
+[docs] + def on_train_batch_end(self, *args, **kwargs): + """ + Call at the end of each mini-batch during the training. + + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + pass
+ + +
+[docs] + def on_fit_end(self, *args, **kwargs): + """ + Call at the end of the training. + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + pass
+
+ + + +
+[docs] +class AbstractPPECallback(AbstractCallback): + """ + Abstract class for Callback class for knowledge graph embedding models + + + Parameter + --------- + + """ + + def __init__(self, num_epochs, path, last_percent_to_consider): + super(AbstractPPECallback, self).__init__() + self.num_epochs = num_epochs + self.path = path + self.sample_counter = 0 + if last_percent_to_consider is None: + self.epoch_to_start = 1 + self.num_ensemble_coefficient = self.num_epochs - 1 + else: + # Compute the last X % of the training + self.epoch_to_start = self.num_epochs - int(self.num_epochs * last_percent_to_consider / 100) + self.num_ensemble_coefficient = self.num_epochs - self.epoch_to_start + +
+[docs] + def on_fit_start(self, trainer, model): + pass
+ + +
+[docs] + def on_fit_end(self, trainer, model): + model.load_state_dict(torch.load(f"{self.path}/trainer_checkpoint_main.pt", torch.device('cpu')))
+ + +
+[docs] + def on_train_epoch_end(self, trainer, model): + if self.epoch_to_start <= 0: + if self.sample_counter == 0: + torch.save(model.state_dict(), f=f"{self.path}/trainer_checkpoint_main.pt") + # (1) Load the running parameter ensemble model. + param_ensemble = torch.load(f"{self.path}/trainer_checkpoint_main.pt", torch.device(model.device)) + with torch.no_grad(): + for k, v in model.state_dict().items(): + if v.dtype==torch.float: + # (2) Update the parameter ensemble model with the current model. + param_ensemble[k] += self.alphas[self.sample_counter] * v + # (3) Save the updated parameter ensemble model. + torch.save(param_ensemble, f=f"{self.path}/trainer_checkpoint_main.pt") + self.sample_counter += 1 + + self.epoch_to_start -= 1
+ + +
+[docs] + def on_train_batch_end(self, *args, **kwargs): + return
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/callbacks.html b/_modules/dicee/callbacks.html new file mode 100644 index 00000000..72e46539 --- /dev/null +++ b/_modules/dicee/callbacks.html @@ -0,0 +1,706 @@ + + + + + + + + dicee.callbacks - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.callbacks

+import datetime
+import time
+import numpy as np
+import torch
+
+import dicee.models.base_model
+from .static_funcs import save_checkpoint_model, exponential_function, save_pickle
+from .abstracts import AbstractCallback, AbstractPPECallback
+import pandas as pd
+
+
+
+[docs] +class AccumulateEpochLossCallback(AbstractCallback): + def __init__(self, path: str): + super().__init__() + self.path = path + +
+[docs] + def on_fit_end(self, trainer, model) -> None: + """ + Store epoch loss + + + Parameter + --------- + trainer: + + model: + + Returns + --------- + None + """ + pd.DataFrame(model.loss_history, columns=['EpochLoss']).to_csv(f'{self.path}/epoch_losses.csv')
+
+ + + +
+[docs] +class PrintCallback(AbstractCallback): + def __init__(self): + super().__init__() + self.start_time = time.time() + +
+[docs] + def on_fit_start(self, trainer, pl_module): + print(pl_module) + print(pl_module.summarize()) + print(pl_module.selected_optimizer) + print(f"\nTraining is starting {datetime.datetime.now()}...")
+ + +
+[docs] + def on_fit_end(self, trainer, pl_module): + training_time = time.time() - self.start_time + if 60 > training_time: + message = f'{training_time:.3f} seconds.' + elif 60 * 60 > training_time > 60: + message = f'{training_time / 60:.3f} minutes.' + elif training_time > 60 * 60: + message = f'{training_time / (60 * 60):.3f} hours.' + else: + message = f'{training_time:.3f} seconds.' + print(f"Training Runtime: {message}\n")
+ + +
+[docs] + def on_train_batch_end(self, *args, **kwargs): + return
+ + +
+[docs] + def on_train_epoch_end(self, *args, **kwargs): + return
+
+ + + +
+[docs] +class KGESaveCallback(AbstractCallback): + def __init__(self, every_x_epoch: int, max_epochs: int, path: str): + super().__init__() + self.every_x_epoch = every_x_epoch + self.max_epochs = max_epochs + self.epoch_counter = 0 + self.path = path + if self.every_x_epoch is None: + self.every_x_epoch = max(self.max_epochs // 2, 1) + +
+[docs] + def on_train_batch_end(self, *args, **kwargs): + return
+ + +
+[docs] + def on_fit_start(self, trainer, pl_module): + pass
+ + +
+[docs] + def on_train_epoch_end(self, *args, **kwargs): + pass
+ + +
+[docs] + def on_fit_end(self, *args, **kwargs): + pass
+ + +
+[docs] + def on_epoch_end(self, model, trainer, **kwargs): + if self.epoch_counter % self.every_x_epoch == 0 and self.epoch_counter > 1: + print(f'\nStoring model {self.epoch_counter}...') + save_checkpoint_model(model, + path=self.path + f'/model_at_{str(self.epoch_counter)}_' + f'epoch_{str(str(datetime.datetime.now()))}.pt') + self.epoch_counter += 1
+
+ + + +
+[docs] +class PseudoLabellingCallback(AbstractCallback): + def __init__(self, data_module, kg, batch_size): + super().__init__() + self.data_module = data_module + self.kg = kg + self.num_of_epochs = 0 + self.unlabelled_size = len(self.kg.unlabelled_set) + self.batch_size = batch_size + +
+[docs] + def create_random_data(self): + entities = torch.randint(low=0, high=self.kg.num_entities, size=(self.batch_size, 2)) + relations = torch.randint(low=0, high=self.kg.num_relations, size=(self.batch_size,)) + # unlabelled triples + return torch.stack((entities[:, 0], relations, entities[:, 1]), dim=1)
+ + +
+[docs] + def on_epoch_end(self, trainer, model): + # Create random triples + # if trainer.current_epoch < 10: + # return None + # Increase it size, Now we increase it. + model.eval() + with torch.no_grad(): + # (1) Create random triples + # unlabelled_input_batch = self.create_random_data() + # (2) or use unlabelled batch + unlabelled_input_batch = self.kg.unlabelled_set[ + torch.randint(low=0, high=self.unlabelled_size, size=(self.batch_size,))] + # (2) Predict unlabelled batch, and use prediction as pseudo-labels + pseudo_label = torch.sigmoid(model(unlabelled_input_batch)) + selected_triples = unlabelled_input_batch[pseudo_label >= .90] + if len(selected_triples) > 0: + # Update dataset + self.data_module.train_set_idx = np.concatenate( + (self.data_module.train_set_idx, selected_triples.detach().numpy()), + axis=0) + trainer.train_dataloader = self.data_module.train_dataloader() + print(f'\tEpoch:{trainer.current_epoch}: Pseudo-labelling\t |D|= {len(self.data_module.train_set_idx)}') + model.train()
+
+ + + +
+[docs] +def estimate_q(eps): + """ estimate rate of convergence q from sequence esp""" + x = np.arange(len(eps) - 1) + y = np.log(np.abs(np.diff(np.log(eps)))) + line = np.polyfit(x, y, 1) # fit degree 1 polynomial + q = np.exp(line[0]) # find q + return q
+ + + +
+[docs] +def compute_convergence(seq, i): + assert len(seq) >= i > 0 + return estimate_q(seq[-i:] / (np.arange(i) + 1))
+ + + +
+[docs] +class PPE(AbstractPPECallback): + """ A callback for Polyak Parameter Ensemble Technique + Maintains a running parameter average for all parameters requiring gradient signals + """ + + def __init__(self, num_epochs, path, last_percent_to_consider=None): + super().__init__(num_epochs, path, last_percent_to_consider) + self.alphas = np.ones(self.num_ensemble_coefficient) / self.num_ensemble_coefficient
+ + +
+[docs] +class FPPE(AbstractPPECallback): + """ + import matplotlib.pyplot as plt + import numpy as np + def exponential_function(x: np.ndarray, lam: float, ascending_order=True) -> torch.FloatTensor: + # A sequence in exponentially decreasing order + result = np.exp(-lam * x) / np.sum(np.exp(-lam * x)) + assert 0.999 < sum(result) < 1.0001 + result = np.flip(result) if ascending_order else result + return torch.tensor(result.tolist()) + + N = 100 + equal_weights = np.ones(N) / N + plt.plot(equal_weights, 'r', label="Equal") + plt.plot(exponential_function(np.arange(N), lam=0.1,), 'c-', label="Exp. forgetful with 0.1") + plt.plot(exponential_function(np.arange(N), lam=0.05), 'g-', label="Exp. forgetful with 0.05") + plt.plot(exponential_function(np.arange(N), lam=0.025), 'b-', label="Exp. forgetful with 0.025") + plt.plot(exponential_function(np.arange(N), lam=0.01), 'k-', label="Exp. forgetful with 0.01") + plt.title('Ensemble coefficients') + plt.xlabel('Epochs') + plt.ylabel('Coefficients') + plt.legend() + plt.savefig('ensemble_coefficients.pdf') + plt.show() + """ + + def __init__(self, num_epochs, path, last_percent_to_consider=None): + super().__init__(num_epochs, path, last_percent_to_consider) + lamb = 0.1 + self.alphas = exponential_function(np.arange(self.num_ensemble_coefficient), lam=lamb, ascending_order=True) + print(f"Forgetful Ensemble Coefficients with lambda {lamb}:", self.alphas)
+ + + +
+[docs] +class Eval(AbstractCallback): + def __init__(self, path, epoch_ratio: int = None): + super().__init__() + self.path = path + self.reports = [] + self.epoch_ratio = epoch_ratio if epoch_ratio is not None else 1 + self.epoch_counter = 0 + +
+[docs] + def on_fit_start(self, trainer, model): + pass
+ + +
+[docs] + def on_fit_end(self, trainer, model): + save_pickle(data=self.reports, file_path=trainer.attributes.full_storage_path + '/evals_per_epoch') + """ + + fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(7, 7)) + for (p,q), mrr in pairs_to_train_mrr.items(): + ax1.plot(mrr, label=f'{p},{q}') + ax1.set_ylabel('Train MRR') + + for (p,q), mrr in pairs_to_val_mrr.items(): + ax2.plot(mrr, label=f'{p},{q}') + ax2.set_ylabel('Val MRR') + + plt.legend() + plt.xlabel('Epochs') + plt.savefig('{full_storage_path}train_val_mrr.pdf') + plt.show() + """
+ + +
+[docs] + def on_train_epoch_end(self, trainer, model): + self.epoch_counter += 1 + if self.epoch_counter % self.epoch_ratio == 0: + model.eval() + report = trainer.evaluator.eval(dataset=trainer.dataset, trained_model=model, + form_of_labelling=trainer.form_of_labelling, during_training=True) + model.train() + self.reports.append(report)
+ + +
+[docs] + def on_train_batch_end(self, *args, **kwargs): + return
+
+ + + +
+[docs] +class KronE(AbstractCallback): + def __init__(self): + super().__init__() + self.f = None + +
+[docs] + @staticmethod + def batch_kronecker_product(a, b): + """ + Kronecker product of matrices a and b with leading batch dimensions. + Batch dimensions are broadcast. The number of them mush + :type a: torch.Tensor + :type b: torch.Tensor + :rtype: torch.Tensor + """ + + a, b = a.unsqueeze(1), b.unsqueeze(1) + + siz1 = torch.Size(torch.tensor(a.shape[-2:]) * torch.tensor(b.shape[-2:])) + res = a.unsqueeze(-1).unsqueeze(-3) * b.unsqueeze(-2).unsqueeze(-4) + siz0 = res.shape[:-4] + res = res.reshape(siz0 + siz1) + return res.flatten(1)
+ + +
+[docs] + def get_kronecker_triple_representation(self, indexed_triple: torch.LongTensor): + """ + Get kronecker embeddings + """ + n, d = indexed_triple.shape + assert d == 3 + # Get the embeddings + head_ent_emb, rel_ent_emb, tail_ent_emb = self.f(indexed_triple) + + head_ent_kron_emb = self.batch_kronecker_product(*torch.hsplit(head_ent_emb, 2)) + rel_ent_kron_emb = self.batch_kronecker_product(*torch.hsplit(rel_ent_emb, 2)) + tail_ent_kron_emb = self.batch_kronecker_product(*torch.hsplit(tail_ent_emb, 2)) + + return torch.cat((head_ent_emb, head_ent_kron_emb), dim=1), \ + torch.cat((rel_ent_emb, rel_ent_kron_emb), dim=1), \ + torch.cat((tail_ent_emb, tail_ent_kron_emb), dim=1)
+ + +
+[docs] + def on_fit_start(self, trainer, model): + if isinstance(model.normalize_head_entity_embeddings, dicee.models.base_model.IdentityClass): + self.f = model.get_triple_representation + model.get_triple_representation = self.get_kronecker_triple_representation + + else: + raise NotImplementedError('Normalizer should be reinitialized')
+
+ + + +
+[docs] +class Perturb(AbstractCallback): + """ A callback for a three-Level Perturbation + + Input Perturbation: During training an input x is perturbed by randomly replacing its element. + In the context of knowledge graph embedding models, x can denote a triple, a tuple of an entity and a relation, + or a tuple of two entities. + A perturbation means that a component of x is randomly replaced by an entity or a relation. + + Parameter Perturbation: + + Output Perturbation: + """ + + def __init__(self, level: str = "input", ratio: float = 0.0, method: str = None, scaler: float = None, + frequency=None): + """ + level in {input, param, output} + ratio:float btw [0,1] a percentage of mini-batch data point to be perturbed. + method = ? + """ + super().__init__() + + assert level in {"input", "param", "out"} + assert ratio >= 0.0 + self.level = level + self.ratio = ratio + self.method = method + self.scaler = scaler + self.frequency = frequency # per epoch, per mini-batch ? + +
+[docs] + def on_train_batch_start(self, trainer, model, batch, batch_idx): + # Modifications should be in-place + x, y = batch + n, _ = x.shape + num_of_perturbed_data = int(n * self.ratio) + if num_of_perturbed_data ==0: + return None + assert n > 0 + device = x.get_device() + if device == -1: + device = "cpu" + # Sample random integers from 0 to n without replacement and take k of tem + random_indices = torch.randperm(n, device=device)[:num_of_perturbed_data] + if self.level == "input": + if torch.rand(1) > 0.5: + # Perturb input via heads + perturbation = torch.randint(low=0, high=model.num_entities, size=(num_of_perturbed_data,), + device=device) + x[random_indices] = torch.column_stack( + (perturbation, x[:, 1][random_indices])) + else: + # Perturb input via relations + perturbation = torch.randint(low=0, high=model.num_relations, size=(num_of_perturbed_data,), + device=device) + x[random_indices] = torch.column_stack( + (x[:, 0][random_indices], perturbation)) + elif self.level == "param": + h, r = torch.hsplit(x, 2) + + if self.method == "GN": + if torch.rand(1) > 0.0: + h_selected = h[random_indices] + with torch.no_grad(): + model.entity_embeddings.weight[h_selected] += torch.normal(mean=0, std=self.scaler, + size=model.entity_embeddings.weight[ + h_selected].shape, + device=model.device) + else: + r_selected = r[random_indices] + with (torch.no_grad()): + model.relation_embeddings.weight[r_selected] += torch.normal(mean=0, std=self.scaler, + size= + model.entity_embeddings.weight[ + r_selected].shape, + device=model.device) + elif self.method == "RN": + if torch.rand(1) > 0.0: + h_selected = h[random_indices] + with torch.no_grad(): + model.entity_embeddings.weight[h_selected] += torch.rand( + size=model.entity_embeddings.weight[h_selected].shape, device=model.device) * self.scaler + else: + r_selected = r[random_indices] + with torch.no_grad(): + model.relation_embeddings.weight[r_selected] += torch.rand( + size=model.entity_embeddings.weight[r_selected].shape, device=model.device) * self.scaler + else: + raise RuntimeError(f"--method is given as {self.method}!") + elif self.level == "out": + + if self.method == "RN": + # Soft Perturb ? + perturb = torch.rand(1, device=model.device) * self.scaler + # https://pytorch.org/docs/stable/generated/torch.where.html + # 1.0 => 1.0 - perturb + # 0.0 => perturb + batch[1][random_indices] = torch.where(batch[1][random_indices] == 1.0, 1.0 - perturb, perturb) + elif self.method=="Hard": + # Hard flip all + batch[1][random_indices] = torch.where(batch[1][random_indices] == 1.0, 0.0, 1.0) + else: + raise NotImplementedError(f"{self.level}") + else: + raise RuntimeError(f"--level is given as {self.level}!")
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/config.html b/_modules/dicee/config.html new file mode 100644 index 00000000..f0bc4efe --- /dev/null +++ b/_modules/dicee/config.html @@ -0,0 +1,356 @@ + + + + + + + + dicee.config - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.config

+import argparse
+
+[docs] +class Namespace(argparse.Namespace): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.dataset_dir: str = 'KGs/UMLS' + "The path of a folder containing train.txt, and/or valid.txt and/or test.txt" + + self.save_embeddings_as_csv: bool = False + "A flag for saving embeddings in csv file." + + self.storage_path: str = 'Experiments' + "A directory named with time of execution under --storage_path that contains related data about embeddings." + + self.path_to_store_single_run: str = None + "A single directory created that contains related data about embeddings." + + self.path_single_kg = None + "Path of a file corresponding to the input knowledge graph" + + self.sparql_endpoint = None + "An endpoint of a triple store." + + self.save_embeddings_as_csv=True + "Embeddings of entities and relations are stored into CSV files to facilitate easy usage." + + self.model: str = "Keci" + "KGE model" + + self.optim: str = 'Adam' + "Optimizer" + + self.embedding_dim: int = 64 + "Size of continuous vector representation of an entity/relation" + + self.num_epochs: int = 150 + "Number of pass over the training data" + + self.batch_size: int = 1024 + "Mini-batch size if it is None, an automatic batch finder technique applied" + + self.lr: float = 0.1 + """Learning rate""" + + self.add_noise_rate: float = None + "The ratio of added random triples into training dataset" + + self.p: int = 0 + "P parameter of Clifford Embeddings" + + self.q: int = 1 + "Q parameter of Clifford Embeddings" + + self.gpus = None + """Number GPUs to be used during training""" + + self.callbacks = dict() + """Callbacks, e.g., {"PPE":{ "last_percent_to_consider": 10}}""" + + self.backend: str = "pandas" + """Backend to read, process, and index input knowledge graph. pandas, polars and rdflib available""" + + self.trainer: str = 'torchCPUTrainer' + """Trainer for knowledge graph embedding model""" + + self.scoring_technique: str = 'KvsAll' + """Scoring technique for knowledge graph embedding models""" + + self.neg_ratio: int = 0 + """Negative ratio for a true triple in NegSample training_technique""" + + self.weight_decay: float = 0.0 + """Weight decay for all trainable params""" + + self.input_dropout_rate: float = 0.0 + """Dropout rate on embeddings of input triples""" + + self.hidden_dropout_rate: float = 0.0 + """Dropout rate on hidden representations of input triples""" + + self.feature_map_dropout_rate: float = 0.0 + """Dropout rate on a feature map generated by a convolution operation""" + + self.normalization: str = "None" + """ LayerNorm, BatchNorm1d, or None """ + + self.init_param: str = None + """ xavier_normal or None""" + + self.gradient_accumulation_steps: int = 0 + """ Not tested e""" + + self.num_folds_for_cv: int = 0 + """ Number of folds for CV""" + + self.eval_model: str = "train_val_test" + """ Evaluate trained model choices:["None", "train", "train_val", "train_val_test", "test"]""" + + self.save_model_at_every_epoch: int = None + """ Not tested """ + + self.label_smoothing_rate: float = 0.0 + + self.kernel_size: int = 3 + """Size of a square kernel in a convolution operation""" + + self.num_of_output_channels: int = 32 + """Number of slices in the generated feature map by convolution.""" + + self.num_core: int = 0 + """Number of CPUs to be used in the mini-batch loading process""" + + self.random_seed: int = 0 + "Random Seed" + + self.sample_triples_ratio: float = None + """Read some triples that are uniformly at random sampled. Ratio being between 0 and 1""" + + self.read_only_few: int = None + """Read only first few triples """ + + self.pykeen_model_kwargs = dict() + """Additional keyword arguments for pykeen models""" + + def __iter__(self): + # Iterate + for k, v in self.__dict__.items(): + yield k, v
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/dataset_classes.html b/_modules/dicee/dataset_classes.html new file mode 100644 index 00000000..4fd040c8 --- /dev/null +++ b/_modules/dicee/dataset_classes.html @@ -0,0 +1,890 @@ + + + + + + + + dicee.dataset_classes - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.dataset_classes

+from torch.utils.data import DataLoader
+import numpy as np
+import torch
+import pytorch_lightning as pl
+from typing import Dict, List
+from .static_preprocess_funcs import mapping_from_first_two_cols_to_third
+from .static_funcs import timeit, load_pickle
+
+
+
+[docs] +def input_data_type_checking(train_set_idx, valid_set_idx, test_set_idx, entity_to_idx: Dict, relation_to_idx: Dict): + """ Type checking for efficient memory usage""" + assert isinstance(train_set_idx, np.ndarray) + assert str(np.dtype(train_set_idx.dtype)) in ['int8', 'int16', 'int32'] + if valid_set_idx is not None: + if len(valid_set_idx) > 0: + assert isinstance(valid_set_idx, np.ndarray) + assert str(np.dtype(valid_set_idx.dtype)) in ['int8', 'int16', 'int32'] + if test_set_idx is not None: + if len(test_set_idx) > 0: + assert isinstance(test_set_idx, np.ndarray) + assert str(np.dtype(test_set_idx.dtype)) in ['int8', 'int16', 'int32'] + assert isinstance(entity_to_idx, dict) + assert isinstance(relation_to_idx, dict)
+ + + +
+[docs] +def create_tensor(x: np.ndarray): + str_type = str(np.dtype(x.dtype)) + if str_type == 'int8': + return torch.CharTensor(x) + elif str_type == 'int16': + return torch.ShortTensor(x) + elif str_type == 'int32': + return torch.IntTensor(x) + else: + raise TypeError(f'x has a type of {str_type}.')
+ + + +
+[docs] +@timeit +def reload_dataset(path: str, form_of_labelling, scoring_technique, neg_ratio, label_smoothing_rate): + """ Reload the files from disk to construct the Pytorch dataset """ + return construct_dataset(train_set=np.load(path + '/train_set.npy'), + valid_set=None, + test_set=None, + entity_to_idx=load_pickle(file_path=path + '/entity_to_idx.p'), + relation_to_idx=load_pickle(file_path=path + '/relation_to_idx.p'), + form_of_labelling=form_of_labelling, + scoring_technique=scoring_technique, neg_ratio=neg_ratio, + label_smoothing_rate=label_smoothing_rate)
+ + + +
+[docs] +@timeit +def construct_dataset(*, train_set: np.ndarray, + valid_set=None, + test_set=None, + entity_to_idx: dict, + relation_to_idx: dict, + form_of_labelling: str, + scoring_technique: str, + neg_ratio: int, + label_smoothing_rate: float) -> torch.utils.data.Dataset: + if scoring_technique == 'NegSample': + # Binary-class. + train_set = TriplePredictionDataset(train_set=train_set, + num_entities=len(entity_to_idx), + num_relations=len(relation_to_idx), + neg_sample_ratio=neg_ratio, + label_smoothing_rate=label_smoothing_rate) + elif form_of_labelling == 'EntityPrediction': + if scoring_technique == '1vsAll': + # Multi-class. + train_set = OnevsAllDataset(train_set, entity_idxs=entity_to_idx) + elif scoring_technique == 'KvsSample': + # Multi-label. + train_set = KvsSampleDataset(train_set=train_set, + num_entities=len(entity_to_idx), + num_relations=len(relation_to_idx), + neg_sample_ratio=neg_ratio, + label_smoothing_rate=label_smoothing_rate) + elif scoring_technique == 'KvsAll': + # Multi-label. + train_set = KvsAll(train_set, + entity_idxs=entity_to_idx, + relation_idxs=relation_to_idx, form=form_of_labelling, + label_smoothing_rate=label_smoothing_rate) + elif scoring_technique == 'AllvsAll': + # Multi-label imbalanced. + train_set = AllvsAll(train_set, + entity_idxs=entity_to_idx, + relation_idxs=relation_to_idx, + label_smoothing_rate=label_smoothing_rate) + else: + raise ValueError(f'Invalid scoring technique : {scoring_technique}') + elif form_of_labelling == 'RelationPrediction': + # Multi-label. + train_set = KvsAll(train_set, entity_idxs=entity_to_idx, relation_idxs=relation_to_idx, + form=form_of_labelling, label_smoothing_rate=label_smoothing_rate) + else: + raise KeyError('Illegal input.') + return train_set
+ + + +
+[docs] +class OnevsAllDataset(torch.utils.data.Dataset): + """ + Dataset for the 1vsALL training strategy + + Parameters + ---------- + train_set_idx + Indexed triples for the training. + entity_idxs + mapping. + relation_idxs + mapping. + form + ? + num_workers + int for https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader + + + + Returns + ------- + torch.utils.data.Dataset + """ + + def __init__(self, train_set_idx: np.ndarray, entity_idxs): + super().__init__() + assert isinstance(train_set_idx, np.ndarray) + assert len(train_set_idx) > 0 + self.train_data = torch.LongTensor(train_set_idx) + self.target_dim = len(entity_idxs) + self.collate_fn = None + + def __len__(self): + return len(self.train_data) + + def __getitem__(self, idx): + y_vec = torch.zeros(self.target_dim) + y_vec[self.train_data[idx, 2]] = 1 + return self.train_data[idx, :2], y_vec
+ + + +
+[docs] +class KvsAll(torch.utils.data.Dataset): + """ Creates a dataset for KvsAll training by inheriting from torch.utils.data.Dataset. + Let D denote a dataset for KvsAll training and be defined as D:= {(x,y)_i}_i ^N, where + x: (h,r) is an unique tuple of an entity h \in E and a relation r \in R that has been seed in the input graph. + y: denotes a multi-label vector \in [0,1]^{|E|} is a binary label. \forall y_i =1 s.t. (h r E_i) \in KG + + .. note:: + TODO + + Parameters + ---------- + train_set_idx : numpy.ndarray + n by 3 array representing n triples + + entity_idxs : dictonary + string representation of an entity to its integer id + + relation_idxs : dictonary + string representation of a relation to its integer id + + Returns + ------- + self : torch.utils.data.Dataset + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> a = KvsAll() + >>> a + ? array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + """ + + def __init__(self, train_set_idx: np.ndarray, entity_idxs, relation_idxs, form, store=None, + label_smoothing_rate: float = 0.0): + super().__init__() + assert len(train_set_idx) > 0 + assert isinstance(train_set_idx, np.ndarray) + self.train_data = None + self.train_target = None + self.label_smoothing_rate = torch.tensor(label_smoothing_rate) + self.collate_fn = None + + # (1) Create a dictionary of training data pints + # Either from tuple of entities or tuple of an entity and a relation + if store is None: + store = dict() + if form == 'RelationPrediction': + self.target_dim = len(relation_idxs) + for s_idx, p_idx, o_idx in train_set_idx: + store.setdefault((s_idx, o_idx), list()).append(p_idx) + elif form == 'EntityPrediction': + self.target_dim = len(entity_idxs) + store = mapping_from_first_two_cols_to_third(train_set_idx) + else: + raise NotImplementedError + else: + raise ValueError() + assert len(store) > 0 + # Keys in store correspond to integer representation (index) of subject and predicate + # Values correspond to a list of integer representations of entities. + self.train_data = torch.LongTensor(list(store.keys())) + + if sum([len(i) for i in store.values()]) == len(store): + # if each s,p pair contains at most 1 entity + self.train_target = np.array(list(store.values())) + try: + assert isinstance(self.train_target[0], np.ndarray) + except IndexError or AssertionError: + print(self.train_target) + exit(1) + else: + self.train_target = list(store.values()) + assert isinstance(self.train_target[0], list) + del store + + def __len__(self): + assert len(self.train_data) == len(self.train_target) + return len(self.train_data) + + def __getitem__(self, idx): + # 1. Initialize a vector of output. + y_vec = torch.zeros(self.target_dim) + y_vec[self.train_target[idx]] = 1.0 + + if self.label_smoothing_rate: + y_vec = y_vec * (1 - self.label_smoothing_rate) + (1 / y_vec.size(0)) + return self.train_data[idx], y_vec
+ + + +
+[docs] +class AllvsAll(torch.utils.data.Dataset): + """ Creates a dataset for AllvsAll training by inheriting from torch.utils.data.Dataset. + Let D denote a dataset for AllvsAll training and be defined as D:= {(x,y)_i}_i ^N, where + x: (h,r) is a possible unique tuple of an entity h \in E and a relation r \in R. Hence N = |E| x |R| + y: denotes a multi-label vector \in [0,1]^{|E|} is a binary label. \forall y_i =1 s.t. (h r E_i) \in KG + + .. note:: + AllvsAll extends KvsAll via none existing (h,r). Hence, it adds data points that are labelled without 1s, + only with 0s. + + Parameters + ---------- + train_set_idx : numpy.ndarray + n by 3 array representing n triples + + entity_idxs : dictonary + string representation of an entity to its integer id + + relation_idxs : dictonary + string representation of a relation to its integer id + + Returns + ------- + self : torch.utils.data.Dataset + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> a = AllvsAll() + >>> a + ? array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + """ + + def __init__(self, train_set_idx: np.ndarray, entity_idxs, relation_idxs, + label_smoothing_rate=0.0): + super().__init__() + assert len(train_set_idx) > 0 + assert isinstance(train_set_idx, np.ndarray) + self.train_data = None + self.train_target = None + self.label_smoothing_rate = torch.tensor(label_smoothing_rate) + self.collate_fn = None + # (1) Create a dictionary of training data pints + # Either from tuple of entities or tuple of an entity and a relation + self.target_dim = len(entity_idxs) + # (h,r) => [t] + store = mapping_from_first_two_cols_to_third(train_set_idx) + print("Number of unique pairs:", len(store)) + for i in range(len(entity_idxs)): + for j in range(len(relation_idxs)): + if store.get((i, j), None) is None: + store[(i, j)] = list() + print("Number of unique augmented pairs:", len(store)) + assert len(store) > 0 + self.train_data = torch.LongTensor(list(store.keys())) + + if sum([len(i) for i in store.values()]) == len(store): + self.train_target = np.array(list(store.values())) + assert isinstance(self.train_target[0], np.ndarray) + else: + self.train_target = list(store.values()) + assert isinstance(self.train_target[0], list) + del store + + def __len__(self): + assert len(self.train_data) == len(self.train_target) + return len(self.train_data) + + def __getitem__(self, idx): + # 1. Initialize a vector of output. + y_vec = torch.zeros(self.target_dim) + existing_indices = self.train_target[idx] + if len(existing_indices) > 0: + y_vec[self.train_target[idx]] = 1.0 + + if self.label_smoothing_rate: + y_vec = y_vec * (1 - self.label_smoothing_rate) + (1 / y_vec.size(0)) + return self.train_data[idx], y_vec
+ + + +
+[docs] +class KvsSampleDataset(torch.utils.data.Dataset): + """ + KvsSample a Dataset: + D:= {(x,y)_i}_i ^N, where + . x:(h,r) is a unique h \in E and a relation r \in R and + . y \in [0,1]^{|E|} is a binary label. \forall y_i =1 s.t. (h r E_i) \in KG + At each mini-batch construction, we subsample(y), hence n + |new_y| << |E| + new_y contains all 1's if sum(y)< neg_sample ratio + new_y contains + Parameters + ---------- + train_set_idx + Indexed triples for the training. + entity_idxs + mapping. + relation_idxs + mapping. + form + ? + store + ? + label_smoothing_rate + ? + Returns + ------- + torch.utils.data.Dataset + """ + + def __init__(self, train_set: np.ndarray, num_entities, num_relations, neg_sample_ratio: int = None, + label_smoothing_rate: float = 0.0): + super().__init__() + assert isinstance(train_set, np.ndarray) + assert isinstance(neg_sample_ratio, int) + self.train_data = train_set + self.num_entities = num_entities + self.num_relations = num_relations + self.neg_sample_ratio = neg_sample_ratio + self.label_smoothing_rate = torch.tensor(label_smoothing_rate) + self.collate_fn = None + + if self.neg_sample_ratio == 0: + print(f'neg_sample_ratio is {neg_sample_ratio}. It will be set to 10.') + self.neg_sample_ratio = 10 + + print('Constructing training data...') + store = mapping_from_first_two_cols_to_third(train_set) + self.train_data = torch.IntTensor(list(store.keys())) + # https://pytorch.org/docs/stable/data.html#multi-process-data-loading + # TLDL; replace Python objects with non-refcounted representations such as Pandas, Numpy or PyArrow objects + # Unsure whether a list of numpy arrays are non-refcounted + self.train_target = list([np.array(i) for i in store.values()]) + del store + # @TODO: Investigate reference counts of using list of numpy arrays. + # import sys + # import gc + # print(sys.getrefcount(self.train_target)) + # print(sys.getrefcount(self.train_target[0])) + # print(gc.get_referrers(self.train_target)) + # print(gc.get_referrers(self.train_target[0])) + + def __len__(self): + assert len(self.train_data) == len(self.train_target) + return len(self.train_data) + + def __getitem__(self, idx): + # (1) Get i.th unique (head,relation) pair. + x = self.train_data[idx] + # (2) Get tail entities given (1). + positives_idx = self.train_target[idx] + num_positives = len(positives_idx) + # (3) Do we need to subsample (2) to create training data points of same size. + if num_positives < self.neg_sample_ratio: + # (3.1) Take all tail entities as positive examples + positives_idx = torch.IntTensor(positives_idx) + # (3.2) Generate more negative entities + negative_idx = torch.randint(low=0, + high=self.num_entities, + size=(self.neg_sample_ratio + self.neg_sample_ratio - num_positives,)) + else: + # (3.1) Subsample positives without replacement. + positives_idx = torch.IntTensor(np.random.choice(positives_idx, size=self.neg_sample_ratio, replace=False)) + # (3.2) Generate random entities. + negative_idx = torch.randint(low=0, + high=self.num_entities, + size=(self.neg_sample_ratio,)) + # (5) Create selected indexes. + y_idx = torch.cat((positives_idx, negative_idx), 0) + # (6) Create binary labels. + y_vec = torch.cat((torch.ones(len(positives_idx)), torch.zeros(len(negative_idx))), 0) + return x, y_idx, y_vec
+ + + +
+[docs] +class NegSampleDataset(torch.utils.data.Dataset): + def __init__(self, train_set: np.ndarray, num_entities: int, num_relations: int, neg_sample_ratio: int = 1): + assert isinstance(train_set, np.ndarray) + # https://pytorch.org/docs/stable/data.html#multi-process-data-loading + # TLDL; replace Python objects with non-refcounted representations such as Pandas, Numpy or PyArrow objects + self.neg_sample_ratio = torch.tensor( + neg_sample_ratio) + self.train_set = torch.from_numpy(train_set).unsqueeze(1) + self.length = len(self.train_set) + self.num_entities = torch.tensor(num_entities) + self.num_relations = torch.tensor(num_relations) + + def __len__(self): + return self.length + + def __getitem__(self, idx): + # (1) Get a triple. + triple = self.train_set[idx] + # (2) Sample an entity. + corr_entities = torch.randint(0, high=self.num_entities, size=(1,)) + # (3) Flip a coin + if torch.rand(1) >= 0.5: + # (3.1) Corrupt (1) via tai. + negative_triple = torch.cat((triple[:, 0], triple[:, 1], corr_entities), dim=0).unsqueeze(0) + else: + # (3.1) Corrupt (1) via head. + negative_triple = torch.cat((corr_entities, triple[:, 1], triple[:, 2]), dim=0).unsqueeze(0) + # (4) Concat positive and negative triples. + x = torch.cat((triple, negative_triple), dim=0) + # (5) Concat labels of (4). + y = torch.tensor([1.0, 0.0]) + return x, y
+ + + +
+[docs] +class TriplePredictionDataset(torch.utils.data.Dataset): + """ + Triple Dataset + + D:= {(x)_i}_i ^N, where + . x:(h,r, t) \in KG is a unique h \in E and a relation r \in R and + . collact_fn => Generates negative triples + + collect_fn: \forall (h,r,t) \in G obtain, create negative triples{(h,r,x),(,r,t),(h,m,t)} + + y:labels are represented in torch.float16 + Parameters + ---------- + train_set_idx + Indexed triples for the training. + entity_idxs + mapping. + relation_idxs + mapping. + form + ? + store + ? + label_smoothing_rate + + + collate_fn: batch:List[torch.IntTensor] + Returns + ------- + torch.utils.data.Dataset + """ + + @timeit + def __init__(self, train_set: np.ndarray, num_entities: int, num_relations: int, neg_sample_ratio: int = 1, + label_smoothing_rate: float = 0.0): + assert isinstance(train_set, np.ndarray) + # https://pytorch.org/docs/stable/data.html#multi-process-data-loading + # TLDL; replace Python objects with non-refcounted representations such as Pandas, Numpy or PyArrow objects + self.label_smoothing_rate = torch.tensor(label_smoothing_rate) + self.neg_sample_ratio = torch.tensor( + neg_sample_ratio) # 0 Implies that we do not add negative samples. This is needed during testing and validation + self.train_set = torch.from_numpy(train_set) + assert num_entities >= max(self.train_set[:, 0]) and num_entities >= max(self.train_set[:, 2]) + self.length = len(self.train_set) + self.num_entities = torch.tensor(num_entities) + self.num_relations = torch.tensor(num_relations) + + def __len__(self): + return self.length + + def __getitem__(self, idx): + return self.train_set[idx] + +
+[docs] + def collate_fn(self, batch: List[torch.Tensor]): + batch = torch.stack(batch, dim=0) + h, r, t = batch[:, 0], batch[:, 1], batch[:, 2] + size_of_batch, _ = batch.shape + assert size_of_batch > 0 + label = torch.ones((size_of_batch,)) - self.label_smoothing_rate + corr_entities = torch.randint(0, high=self.num_entities, size=(size_of_batch * self.neg_sample_ratio,)) + if torch.rand(1) >= 0.5: + # corrupt head + r_head_corr = r.repeat(self.neg_sample_ratio, ) + t_head_corr = t.repeat(self.neg_sample_ratio, ) + label_head_corr = torch.zeros(len(t_head_corr)) + self.label_smoothing_rate + + h = torch.cat((h, corr_entities), 0) + r = torch.cat((r, r_head_corr), 0) + t = torch.cat((t, t_head_corr), 0) + x = torch.stack((h, r, t), dim=1) + label = torch.cat((label, label_head_corr), 0) + else: + # corrupt tail + h_tail_corr = h.repeat(self.neg_sample_ratio, ) + r_tail_corr = r.repeat(self.neg_sample_ratio, ) + label_tail_corr = torch.zeros(len(r_tail_corr)) + self.label_smoothing_rate + + h = torch.cat((h, h_tail_corr), 0) + r = torch.cat((r, r_tail_corr), 0) + t = torch.cat((t, corr_entities), 0) + x = torch.stack((h, r, t), dim=1) + label = torch.cat((label, label_tail_corr), 0) + + """ + # corrupt head, tail or rel ?! + # (1) Corrupted Entities: + corr = torch.randint(0, high=self.num_entities, size=(size_of_batch * self.neg_sample_ratio, 2)) + # (2) Head Corrupt: + h_head_corr = corr[:, 0] + r_head_corr = r.repeat(self.neg_sample_ratio, ) + t_head_corr = t.repeat(self.neg_sample_ratio, ) + label_head_corr = torch.zeros(len(t_head_corr)) + self.label_smoothing_rate + # (3) Tail Corrupt: + h_tail_corr = h.repeat(self.neg_sample_ratio, ) + r_tail_corr = r.repeat(self.neg_sample_ratio, ) + t_tail_corr = corr[:, 1] + label_tail_corr = torch.zeros(len(t_tail_corr)) + self.label_smoothing_rate + # (4) Relations Corrupt: + h_rel_corr = h.repeat(self.neg_sample_ratio, ) + r_rel_corr = torch.randint(0, self.num_relations, (size_of_batch * self.neg_sample_ratio, 1))[:, 0] + t_rel_corr = t.repeat(self.neg_sample_ratio, ) + label_rel_corr = torch.zeros(len(t_rel_corr)) + self.label_smoothing_rate + # (5) Stack True and Corrupted Triples + h = torch.cat((h, h_head_corr, h_tail_corr, h_rel_corr), 0) + r = torch.cat((r, r_head_corr, r_tail_corr, r_rel_corr), 0) + t = torch.cat((t, t_head_corr, t_tail_corr, t_rel_corr), 0) + x = torch.stack((h, r, t), dim=1) + label = torch.cat((label, label_head_corr, label_tail_corr, label_rel_corr), 0) + """ + return x, label
+
+ + + +
+[docs] +class CVDataModule(pl.LightningDataModule): + """ + Create a Dataset for cross validation + + Parameters + ---------- + train_set_idx + Indexed triples for the training. + num_entities + entity to index mapping. + num_relations + relation to index mapping. + batch_size + int + form + ? + num_workers + int for https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader + + + + Returns + ------- + ? + """ + + def __init__(self, train_set_idx: np.ndarray, num_entities, num_relations, neg_sample_ratio, batch_size, + num_workers): + super().__init__() + assert isinstance(train_set_idx, np.ndarray) + self.train_set_idx = train_set_idx + self.num_entities = num_entities + self.num_relations = num_relations + self.neg_sample_ratio = neg_sample_ratio + self.batch_size = batch_size + self.num_workers = num_workers + +
+[docs] + def train_dataloader(self) -> DataLoader: + train_set = TriplePredictionDataset(self.train_set_idx, + num_entities=self.num_entities, + num_relations=self.num_relations, + neg_sample_ratio=self.neg_sample_ratio) + return DataLoader(train_set, batch_size=self.batch_size, + shuffle=True, + num_workers=self.num_workers, + collate_fn=train_set.collate_fn)
+ + +
+[docs] + def setup(self, *args, **kwargs): + pass
+ + +
+[docs] + def transfer_batch_to_device(self, *args, **kwargs): + pass
+ + +
+[docs] + def prepare_data(self, *args, **kwargs): + # Nothing to be prepared for now. + pass
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/evaluator.html b/_modules/dicee/evaluator.html new file mode 100644 index 00000000..e416f09d --- /dev/null +++ b/_modules/dicee/evaluator.html @@ -0,0 +1,551 @@ + + + + + + + + dicee.evaluator - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.evaluator

+import torch
+import numpy as np
+import json
+from .static_funcs import pickle
+from .static_funcs_training import evaluate_lp
+from typing import Tuple
+
+
+[docs] +class Evaluator: + """ + Evaluator class to evaluate KGE models in various downstream tasks + + Arguments + ---------- + executor: Executor class instance + """ + + def __init__(self, args, is_continual_training=None): + self.re_vocab = None + self.er_vocab = None + self.ee_vocab = None + self.is_continual_training = is_continual_training + self.num_entities = None + self.num_relations = None + self.domain_constraints_per_rel, self.range_constraints_per_rel = None, None + self.args = args + self.report = dict() + self.during_training = False + +
+[docs] + def vocab_preparation(self, dataset) -> None: + """ + A function to wait future objects for the attributes of executor + + Arguments + ---------- + + Return + ---------- + None + """ + # print("** VOCAB Prep **") + if isinstance(dataset.er_vocab, dict): + self.er_vocab = dataset.er_vocab + else: + self.er_vocab = dataset.er_vocab.result() + + if isinstance(dataset.re_vocab, dict): + self.re_vocab = dataset.re_vocab + else: + self.re_vocab = dataset.re_vocab.result() + + if isinstance(dataset.ee_vocab, dict): + self.ee_vocab = dataset.ee_vocab.result() + else: + self.ee_vocab = dataset.ee_vocab.result() + + if isinstance(dataset.constraints, tuple): + self.domain_constraints_per_rel, self.range_constraints_per_rel = dataset.constraints + else: + try: + self.domain_constraints_per_rel, self.range_constraints_per_rel = dataset.constraints.result() + except RuntimeError: + print('Domain constraint exception occurred') + + self.num_entities = dataset.num_entities + self.num_relations = dataset.num_relations + + pickle.dump(self.er_vocab, open(self.args.full_storage_path + "/er_vocab.p", "wb")) + pickle.dump(self.re_vocab, open(self.args.full_storage_path + "/re_vocab.p", "wb")) + pickle.dump(self.ee_vocab, open(self.args.full_storage_path + "/ee_vocab.p", "wb"))
+ + + # @timeit +
+[docs] + def eval(self, dataset, trained_model, form_of_labelling, during_training=False) -> None: + # @TODO: Why this reassigment ? + self.during_training = during_training + # (1) Exit, if the flag is not set + if self.args.eval_model is None: + return + self.vocab_preparation(dataset) + if self.args.num_folds_for_cv > 1: + return + if isinstance(self.args.eval_model, bool): + print('Wrong input:RESET') + self.args.eval_model = 'train_val_test' + + if self.args.scoring_technique == 'NegSample': + self.eval_rank_of_head_and_tail_entity(train_set=dataset.train_set, + valid_set=dataset.valid_set, + test_set=dataset.test_set, + trained_model=trained_model) + elif self.args.scoring_technique in ["AllvsAll", "KvsAll", 'KvsSample', "1vsAll"]: + self.eval_with_vs_all(train_set=dataset.train_set, + valid_set=dataset.valid_set, + test_set=dataset.test_set, + trained_model=trained_model, + form_of_labelling=form_of_labelling) + else: + raise ValueError(f'Invalid argument: {self.args.scoring_technique}') + if self.during_training is False: + with open(self.args.full_storage_path + '/eval_report.json', 'w') as file_descriptor: + json.dump(self.report, file_descriptor, indent=4) + return {k: v for k, v in self.report.items()}
+ + + def __load_indexed_datasets(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + if 'train' in self.args.eval_model: + train_set = np.load(self.args.full_storage_path + "/train_set.npy") + else: + train_set = None + if 'val' in self.args.eval_model: + valid_set = np.load(self.args.full_storage_path + "/valid_set.npy") + else: + valid_set = None + + if 'test' in self.args.eval_model: + test_set = np.load(self.args.full_storage_path + "/test_set.npy") + else: + test_set = None + return train_set, valid_set, test_set + + def __load_and_set_mappings(self): + self.er_vocab = pickle.load(open(self.args.full_storage_path + "/er_vocab.p", "rb")) + self.re_vocab = pickle.load(open(self.args.full_storage_path + "/re_vocab.p", "rb")) + self.ee_vocab = pickle.load(open(self.args.full_storage_path + "/ee_vocab.p", "rb")) + +
+[docs] + def dummy_eval(self, trained_model, form_of_labelling: str): + + if self.is_continual_training: + self.__load_and_set_mappings() + + train_set, valid_set, test_set = self.__load_indexed_datasets() + + if self.args.scoring_technique == 'NegSample': + self.eval_rank_of_head_and_tail_entity(train_set=train_set, + valid_set=valid_set, + test_set=test_set, + trained_model=trained_model) + elif self.args.scoring_technique in ['KvsAll', 'KvsSample', '1vsAll', 'PvsAll', 'CCvsAll']: + self.eval_with_vs_all(train_set=train_set, + valid_set=valid_set, + test_set=test_set, + trained_model=trained_model, form_of_labelling=form_of_labelling) + else: + raise ValueError(f'Invalid argument: {self.args.scoring_technique}') + with open(self.args.full_storage_path + '/eval_report.json', 'w') as file_descriptor: + json.dump(self.report, file_descriptor, indent=4)
+ + +
+[docs] + def eval_rank_of_head_and_tail_entity(self, *, train_set, valid_set=None, test_set=None, trained_model): + # 4. Test model on the training dataset if it is needed. + if 'train' in self.args.eval_model: + res = self.evaluate_lp(trained_model, train_set, + f'Evaluate {trained_model.name} on Train set') + self.report['Train'] = res + # 5. Test model on the validation and test dataset if it is needed. + if 'val' in self.args.eval_model: + if valid_set is not None: + self.report['Val'] = self.evaluate_lp(trained_model, valid_set, + f'Evaluate {trained_model.name} of Validation set') + + if test_set is not None and 'test' in self.args.eval_model: + self.report['Test'] = self.evaluate_lp(trained_model, test_set, + f'Evaluate {trained_model.name} of Test set')
+ + +
+[docs] + def eval_with_vs_all(self, *, train_set, valid_set=None, test_set=None, trained_model, form_of_labelling) -> None: + """ Evaluate model after reciprocal triples are added """ + if 'train' in self.args.eval_model: + res = self.evaluate_lp_k_vs_all(trained_model, train_set, + info=f'Evaluate {trained_model.name} on Train set', + form_of_labelling=form_of_labelling) + self.report['Train'] = res + + if 'val' in self.args.eval_model: + if valid_set is not None: + res = self.evaluate_lp_k_vs_all(trained_model, valid_set, + f'Evaluate {trained_model.name} on Validation set', + form_of_labelling=form_of_labelling) + self.report['Val'] = res + + if test_set is not None and 'test' in self.args.eval_model: + res = self.evaluate_lp_k_vs_all(trained_model, test_set, + f'Evaluate {trained_model.name} on Test set', + form_of_labelling=form_of_labelling) + self.report['Test'] = res
+ + +
+[docs] + def evaluate_lp_k_vs_all(self, model, triple_idx, info=None, form_of_labelling=None): + """ + Filtered link prediction evaluation. + :param model: + :param triple_idx: test triples + :param info: + :param form_of_labelling: + :return: + """ + # (1) set model to eval model + model.eval() + num_triples = len(triple_idx) + ranks = [] + # Hit range + hits_range = [i for i in range(1, 11)] + hits = {i: [] for i in hits_range} + if info and self.during_training is False: + print(info + ':', end=' ') + if form_of_labelling == 'RelationPrediction': + # Iterate over integer indexed triples in mini batch fashion + for i in range(0, num_triples, self.args.batch_size): + data_batch = triple_idx[i:i + self.args.batch_size] + e1_idx_e2_idx, r_idx = torch.LongTensor(data_batch[:, [0, 2]]), torch.LongTensor(data_batch[:, 1]) + # Generate predictions + predictions = model.forward_k_vs_all(x=e1_idx_e2_idx) + # Filter entities except the target entity + for j in range(data_batch.shape[0]): + filt = self.ee_vocab[(data_batch[j][0], data_batch[j][2])] + target_value = predictions[j, r_idx[j]].item() + predictions[j, filt] = -np.Inf + predictions[j, r_idx[j]] = target_value + # Sort predictions. + sort_values, sort_idxs = torch.sort(predictions, dim=1, descending=True) + # This can be also done in parallel + for j in range(data_batch.shape[0]): + rank = torch.where(sort_idxs[j] == r_idx[j])[0].item() + 1 + ranks.append(rank) + for hits_level in hits_range: + if rank <= hits_level: + hits[hits_level].append(1.0) + else: + # TODO: Why do not we use Pytorch Dataset ? for multiprocessing + # Iterate over integer indexed triples in mini batch fashion + for i in range(0, num_triples, self.args.batch_size): + # (1) Get a batch of data. + data_batch = triple_idx[i:i + self.args.batch_size] + # (2) Extract entities and relations. + e1_idx_r_idx, e2_idx = torch.LongTensor(data_batch[:, [0, 1]]), torch.tensor(data_batch[:, 2]) + # (3) Predict missing entities, i.e., assign probs to all entities. + with torch.no_grad(): + predictions = model(e1_idx_r_idx) + # (4) Filter entities except the target entity + for j in range(data_batch.shape[0]): + # (4.1) Get the ids of the head entity, the relation and the target tail entity in the j.th triple. + id_e, id_r, id_e_target = data_batch[j] + # (4.2) Get all ids of all entities occurring with the head entity and relation extracted in 4.1. + filt = self.er_vocab[(id_e, id_r)] + # (4.3) Store the assigned score of the target tail entity extracted in 4.1. + target_value = predictions[j, id_e_target].item() + # (4.4.1) Filter all assigned scores for entities. + predictions[j, filt] = -np.Inf + # (4.4.2) Filter entities based on the range of a relation as well. + if 'constraint' in self.args.eval_model: + predictions[j, self.range_constraints_per_rel[data_batch[j, 1]]] = -np.Inf + # (4.5) Insert 4.3. after filtering. + predictions[j, id_e_target] = target_value + # (5) Sort predictions. + sort_values, sort_idxs = torch.sort(predictions, dim=1, descending=True) + # (6) Compute the filtered ranks. + for j in range(data_batch.shape[0]): + # index between 0 and \inf + rank = torch.where(sort_idxs[j] == e2_idx[j])[0].item() + 1 + ranks.append(rank) + for hits_level in hits_range: + if rank <= hits_level: + hits[hits_level].append(1.0) + # (7) Sanity checking: a rank for a triple + assert len(triple_idx) == len(ranks) == num_triples + hit_1 = sum(hits[1]) / num_triples + hit_3 = sum(hits[3]) / num_triples + hit_10 = sum(hits[10]) / num_triples + mean_reciprocal_rank = np.mean(1. / np.array(ranks)) + + results = {'H@1': hit_1, 'H@3': hit_3, 'H@10': hit_10, 'MRR': mean_reciprocal_rank} + if info and self.during_training is False: + print(info) + print(results) + return results
+ + +
+[docs] + def evaluate_lp(self, model, triple_idx, info): + """ + + """ + # @TODO: Document this method + return evaluate_lp(model, triple_idx, num_entities=self.num_entities, + er_vocab=self.er_vocab, re_vocab=self.re_vocab, info=info)
+ + +
+[docs] + def eval_with_data(self, dataset, trained_model, triple_idx: np.ndarray, form_of_labelling: str): + self.vocab_preparation(dataset) + + """ Evaluate a trained model on a given a dataset""" + if self.args.scoring_technique == 'NegSample': + return self.evaluate_lp(trained_model, triple_idx, + info=f'Evaluate {trained_model.name} on a given dataset', ) + + elif self.args.scoring_technique in ['KvsAll', 'KvsSample', '1vsAll', 'PvsAll', 'CCvsAll']: + return self.evaluate_lp_k_vs_all(trained_model, triple_idx, + info=f'Evaluate {trained_model.name} on a given dataset', + form_of_labelling=form_of_labelling) + + elif self.args.scoring_technique in ['BatchRelaxedKvsAll', 'BatchRelaxed1vsAll']: + return self.evaluate_lp_k_vs_all(trained_model, triple_idx, + info=f'Evaluate {trained_model.name} on a given dataset', + form_of_labelling=form_of_labelling) + else: + raise ValueError(f'Invalid argument: {self.args.scoring_technique}')
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/executer.html b/_modules/dicee/executer.html new file mode 100644 index 00000000..b199ba5c --- /dev/null +++ b/_modules/dicee/executer.html @@ -0,0 +1,595 @@ + + + + + + + + dicee.executer - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.executer

+import json
+import logging
+import time
+import warnings
+from types import SimpleNamespace
+import os
+import datetime
+import argparse
+from pytorch_lightning import seed_everything
+
+from dicee.knowledge_graph import KG
+from dicee.evaluator import Evaluator
+# Avoid
+from dicee.static_preprocess_funcs import preprocesses_input_args
+from dicee.trainer import DICE_Trainer
+import pytorch_lightning as pl
+
+from dicee.static_funcs import timeit, continual_training_setup_executor, read_or_load_kg, load_json, store
+from dicee.sanity_checkers import config_kge_sanity_checking
+
+logging.getLogger('pytorch_lightning').setLevel(0)
+warnings.filterwarnings(action="ignore", category=DeprecationWarning)
+os.environ["TORCH_DISTRIBUTED_DEBUG"] = "INFO"
+
+
+
+[docs] +class Execute: + """ A class for Training, Retraining and Evaluation a model. + + (1) Loading & Preprocessing & Serializing input data. + (2) Training & Validation & Testing + (3) Storing all necessary info + """ + + def __init__(self, args, continuous_training=False): + # (1) Process arguments and sanity checking. + self.args = preprocesses_input_args(args) + # (2) Ensure reproducibility. + seed_everything(args.random_seed, workers=True) + # (3) Set the continual training flag + self.is_continual_training = continuous_training + # (4) Create an experiment folder or use the previous one + continual_training_setup_executor(self) + # (5) A variable is initialized for pytorch lightning trainer or DICE_Trainer() + self.trainer = None + self.trained_model = None + # (6) A variable is initialized for storing input data. + self.dataset = None + # (7) Store few data in memory for numerical results, e.g. runtime, H@1 etc. + self.report = dict() + # (8) Create an object to carry out link prediction evaluations + self.evaluator = None # e.g. Evaluator(self) + # (9) Execution start time + self.start_time = None + +
+[docs] + def read_preprocess_index_serialize_data(self) -> None: + """ Read & Preprocess & Index & Serialize Input Data + + (1) Read or load the data from disk into memory. + (2) Store the statistics of the data. + + Parameter + ---------- + + Return + ---------- + None + + """ + # (1) Read & Preprocess & Index & Serialize Input Data. + self.dataset = read_or_load_kg(self.args, cls=KG) + # (2) Sanity checking. + self.args, self.dataset = config_kge_sanity_checking(self.args, self.dataset) + # (3) Store the stats + self.args.num_entities = self.dataset.num_entities + self.args.num_relations = self.dataset.num_relations + self.report['num_train_triples'] = len(self.dataset.train_set) + self.report['num_entities'] = self.dataset.num_entities + self.report['num_relations'] = self.dataset.num_relations + self.report['runtime_kg_loading'] = time.time() - self.start_time
+ + +
+[docs] + def load_indexed_data(self) -> None: + """ Load the indexed data from disk into memory + + Parameter + ---------- + + Return + ---------- + None + + """ + self.dataset = read_or_load_kg(self.args, cls=KG)
+ + +
+[docs] + @timeit + def save_trained_model(self) -> None: + """ Save a knowledge graph embedding model + + (1) Send model to eval mode and cpu. + (2) Store the memory footprint of the model. + (3) Save the model into disk. + (4) Update the stats of KG again ? + + Parameter + ---------- + + Return + ---------- + None + + """ + print('*** Save Trained Model ***') + self.trained_model.eval() + self.trained_model.to('cpu') + # Save the epoch loss + # (2) Store NumParam and EstimatedSizeMB + self.report.update(self.trained_model.mem_of_model()) + # (3) Store/Serialize Model for further use. + if self.is_continual_training is False: + store(trainer=self.trainer, + trained_model=self.trained_model, + model_name='model', + full_storage_path=self.storage_path, + save_embeddings_as_csv=self.args.save_embeddings_as_csv) + else: + store(trainer=self.trainer, + trained_model=self.trained_model, + model_name='model_' + str(datetime.datetime.now()), + full_storage_path=self.storage_path, save_embeddings_as_csv=self.args.save_embeddings_as_csv) + + self.report['path_experiment_folder'] = self.storage_path + self.report['num_entities'] = self.args.num_entities + self.report['num_relations'] = self.args.num_relations + self.report['path_experiment_folder'] = self.storage_path
+ + +
+[docs] + def end(self, form_of_labelling: str) -> dict: + """ + End training + + (1) Store trained model. + (2) Report runtimes. + (3) Eval model if required. + + Parameter + --------- + + Returns + ------- + A dict containing information about the training and/or evaluation + + """ + # (1) Save the model + self.save_trained_model() + # (2) Report + self.write_report() + # (3) Eval model and return eval results. + if self.args.eval_model is None: + self.write_report() + return {**self.report} + else: + self.evaluator.eval(dataset=self.dataset, trained_model=self.trained_model, + form_of_labelling=form_of_labelling) + self.write_report() + return {**self.report, **self.evaluator.report}
+ + +
+[docs] + def write_report(self)-> None: + """ Report training related information in a report.json file """ + # Report total runtime. + self.report['Runtime'] = time.time() - self.start_time + print(f"Total Runtime: {self.report['Runtime']:.3f} seconds") + with open(self.args.full_storage_path + '/report.json', 'w') as file_descriptor: + json.dump(self.report, file_descriptor, indent=4)
+ + +
+[docs] + def start(self) -> dict: + """ + Start training + + # (1) Loading the Data + # (2) Create an evaluator object. + # (3) Create a trainer object. + # (4) Start the training + + Parameter + --------- + + Returns + ------- + A dict containing information about the training and/or evaluation + + """ + self.start_time = time.time() + print(f"Start time:{datetime.datetime.now()}") + # (1) Loading the Data + # Load the indexed data from disk or read a raw data from disk. + self.load_indexed_data() if self.is_continual_training else self.read_preprocess_index_serialize_data() + # (2) Create an evaluator object. + self.evaluator = Evaluator(args=self.args) + # (3) Create a trainer object. + self.trainer = DICE_Trainer(args=self.args, + is_continual_training=self.is_continual_training, + storage_path=self.storage_path, + evaluator=self.evaluator) + # (4) Start the training + self.trained_model, form_of_labelling = self.trainer.start(dataset=self.dataset) + return self.end(form_of_labelling)
+
+ + + +
+[docs] +class ContinuousExecute(Execute): + """ A subclass of Execute Class for retraining + + (1) Loading & Preprocessing & Serializing input data. + (2) Training & Validation & Testing + (3) Storing all necessary info + """ + + def __init__(self, args): + assert os.path.exists(args.path_experiment_folder) + assert os.path.isfile(args.path_experiment_folder + '/configuration.json') + # (1) Load Previous input configuration + previous_args = load_json(args.path_experiment_folder + '/configuration.json') + dargs = vars(args) + del args + for k in list(dargs.keys()): + if dargs[k] is None: + del dargs[k] + # (2) Update (1) with new input + previous_args.update(dargs) + try: + report = load_json(dargs['path_experiment_folder'] + '/report.json') + previous_args['num_entities'] = report['num_entities'] + previous_args['num_relations'] = report['num_relations'] + except AssertionError: + print("Couldn't find report.json.") + previous_args = SimpleNamespace(**previous_args) + previous_args.full_storage_path = previous_args.path_experiment_folder + print('ContinuousExecute starting...') + print(previous_args) + # TODO: can we remove continuous_training from Execute ? + super().__init__(previous_args, continuous_training=True) + +
+[docs] + def continual_start(self) -> dict: + """ + Start Continual Training + + (1) Initialize training. + (2) Start continual training. + (3) Save trained model. + + Parameter + --------- + + Returns + ------- + A dict containing information about the training and/or evaluation + + """ + # (1) + self.trainer = DICE_Trainer(args=self.args, is_continual_training=True, + storage_path=self.args.path_experiment_folder) + # (2) + self.trained_model, form_of_labelling = self.trainer.continual_start() + + # (5) Store trained model. + self.save_trained_model() + # (6) Eval model. + if self.args.eval_model is None: + return self.report + else: + self.evaluator = Evaluator(args=self.args, is_continual_training=True) + self.evaluator.dummy_eval(self.trained_model, form_of_labelling) + return {**self.report, **self.evaluator.report}
+
+ + + +
+[docs] +def get_default_arguments(description=None): + """ Extends pytorch_lightning Trainer's arguments with ours """ + parser = pl.Trainer.add_argparse_args(argparse.ArgumentParser(add_help=False)) + # Default Trainer param https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#methods + parser.add_argument("--dataset_dir", type=str, default='KGs/UMLS', + help="The path of a folder containing input data") + parser.add_argument("--save_embeddings_as_csv", type=bool, default=False, + help='A flag for saving embeddings in csv file.') + parser.add_argument("--storage_path", type=str, default='Experiments', + help="Embeddings, model, and any other related data will be stored therein.") + parser.add_argument("--model", type=str, + default="Keci", + help="Available models: CMult, ConEx, ConvQ, ConvO, DistMult, QMult, OMult, " + "Shallom, AConEx, ConEx, ComplEx, DistMult, TransE, CLf") + parser.add_argument('--p', type=int, default=0, + help='P for Clifford Algebra') + parser.add_argument('--q', type=int, default=0, + help='Q for Clifford Algebra') + parser.add_argument('--optim', type=str, default='Adam', + help='[Adan, NAdam, Adam, SGD, Sls, AdamSLS]') + parser.add_argument('--embedding_dim', type=int, default=64, + help='Number of dimensions for an embedding vector. ') + parser.add_argument("--num_epochs", type=int, default=10, help='Number of epochs for training. ') + parser.add_argument('--batch_size', type=int, default=1024, help='Mini batch size') + parser.add_argument('--auto_batch_finder', type=bool, default=False, + help='Find a batch size w.r.t. computational budgets') + parser.add_argument("--lr", type=float, default=0.1) + parser.add_argument('--callbacks', '--list', nargs='+', default=[], + help='List of tuples representing a callback and values, e.g. [FPPE or PPE or PPE10 ,PPE20 or PPE, FPPE]') + parser.add_argument("--backend", type=str, default='pandas', + help='Select [polars(seperator: \t), modin(seperator: \s+), pandas(seperator: \s+)]') + parser.add_argument("--trainer", type=str, default='torchCPUTrainer', + help='PL (pytorch lightning trainer), torchDDP (custom ddp), torchCPUTrainer (custom cpu only)') + parser.add_argument('--scoring_technique', default='KvsAll', help="KvsSample, 1vsAll, KvsAll, NegSample") + parser.add_argument('--neg_ratio', type=int, default=0, + help='The number of negative triples generated per positive triple.') + parser.add_argument('--weight_decay', type=float, default=0.0, help='L2 penalty e.g.(0.00001)') + parser.add_argument('--input_dropout_rate', type=float, default=0.0) + parser.add_argument('--hidden_dropout_rate', type=float, default=0.0) + parser.add_argument("--feature_map_dropout_rate", type=int, default=0.0) + parser.add_argument("--normalization", type=str, default="None", help="[LayerNorm, BatchNorm1d, None]") + parser.add_argument("--init_param", type=str, default=None, help="[xavier_normal, None]") + parser.add_argument("--gradient_accumulation_steps", type=int, default=0, + help="e.g. gradient_accumulation_steps=2 implies that gradients are accumulated " + "at every second mini-batch") + parser.add_argument('--num_folds_for_cv', type=int, default=0, + help='Number of folds in k-fold cross validation.' + 'If >2 ,no evaluation scenario is applied implies no evaluation.') + parser.add_argument("--eval_model", type=str, default="train_val_test", + help='train, val, test, constraint, combine them anyway you want, e.g. ' + 'train_val,train_val_test, val_test, val_test_constraint ') + parser.add_argument("--save_model_at_every_epoch", type=int, default=None, + help='At every X number of epochs model will be saved. If None, we save 4 times.') + parser.add_argument("--label_smoothing_rate", type=float, default=0.0, help='None for not using it.') + parser.add_argument("--kernel_size", type=int, default=3, help="Square kernel size for ConEx") + parser.add_argument("--num_of_output_channels", type=int, default=32, + help="# of output channels in convolution") + parser.add_argument("--num_core", type=int, default=0, + help='Number of cores to be used. 0 implies using single CPU') + parser.add_argument("--seed_for_computation", type=int, default=0, + help='Seed for all, see pl seed_everything().') + parser.add_argument("--sample_triples_ratio", type=float, default=None, help='Sample input data.') + parser.add_argument("--read_only_few", type=int, default=None, + help='READ only first N triples. If 0, read all.') + if description is None: + return parser.parse_args() + return parser.parse_args(description)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/knowledge_graph.html b/_modules/dicee/knowledge_graph.html new file mode 100644 index 00000000..de0fbb27 --- /dev/null +++ b/_modules/dicee/knowledge_graph.html @@ -0,0 +1,313 @@ + + + + + + + + dicee.knowledge_graph - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.knowledge_graph

+from typing import List
+import numpy as np
+from .read_preprocess_save_load_kg import ReadFromDisk, PreprocessKG, LoadSaveToDisk
+import sys
+
+
+
+[docs] +class KG: + """ Knowledge Graph """ + + def __init__(self, dataset_dir: str = None, + add_noise_rate: float = None, + sparql_endpoint: str = None, + path_single_kg: str = None, + path_for_deserialization: str = None, + add_reciprical: bool = None, eval_model: str = None, + read_only_few: int = None, sample_triples_ratio: float = None, + path_for_serialization: str = None, + entity_to_idx=None, relation_to_idx=None, backend=None): + """ + :param dataset_dir: A path of a folder containing train.txt, valid.txt, test.text + :param add_noise_rate: Noisy triples added into the training adataset by x % of its size. + : param sparql_endpoint: An endpoint of a triple store + :param path_single_kg: The path of a single file containing the input knowledge graph + :param path_for_deserialization: A path of a folder containing previously parsed data + :param num_core: Number of subprocesses used for data loading + :param add_reciprical: A flag for applying reciprocal data augmentation technique + :param eval_model: A flag indicating whether evaluation will be applied. + If no eval, then entity relation mappings will be deleted to free memory. + :param add_noise_rate: Add say 10% noise in the input data + sample_triples_ratio + """ + self.sparql_endpoint = sparql_endpoint + self.add_noise_rate = add_noise_rate + self.num_entities = None + self.num_relations = None + self.dataset_dir = dataset_dir + self.path_single_kg = path_single_kg + self.path_for_deserialization = path_for_deserialization + self.add_reciprical = add_reciprical + self.eval_model = eval_model + + self.read_only_few = read_only_few + self.sample_triples_ratio = sample_triples_ratio + self.path_for_serialization = path_for_serialization + # dicts of str to int + self.entity_to_idx = entity_to_idx + self.relation_to_idx = relation_to_idx + self.backend = 'pandas' if backend is None else backend + self.train_set, self.valid_set, self.test_set = None, None, None + + if self.path_for_deserialization is None: + ReadFromDisk(kg=self).start() + PreprocessKG(kg=self).start() + LoadSaveToDisk(kg=self).save() + else: + LoadSaveToDisk(kg=self).load() + + assert len(self.train_set) > 0 + assert len(self.train_set[0]) > 0 + assert isinstance(self.train_set, np.ndarray) + assert isinstance(self.train_set[0], np.ndarray) + self._describe() + + def _describe(self) -> None: + self.description_of_input = f'\n------------------- Description of Dataset {self.dataset_dir} -------------------' + self.description_of_input += f'\nNumber of entities:{self.num_entities}' \ + f'\nNumber of relations:{self.num_relations}' \ + f'\nNumber of triples on train set:' \ + f'{len(self.train_set)}' \ + f'\nNumber of triples on valid set:' \ + f'{len(self.valid_set) if self.valid_set is not None else 0}' \ + f'\nNumber of triples on test set:' \ + f'{len(self.test_set) if self.test_set is not None else 0}\n' + self.description_of_input += f"Entity Index:{sys.getsizeof(self.entity_to_idx) / 1_000_000_000:.5f} in GB\n" + self.description_of_input += f"Relation Index:{sys.getsizeof(self.relation_to_idx) / 1_000_000_000:.5f} in GB\n" + self.description_of_input += f"Train set :{self.train_set.nbytes / 1_000_000_000:.5f} in GB\n" + + @property + def entities_str(self) -> List: + return list(self.entity_to_idx.keys()) + + @property + def relations_str(self) -> List: + return list(self.relation_to_idx.keys())
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/knowledge_graph_embeddings.html b/_modules/dicee/knowledge_graph_embeddings.html new file mode 100644 index 00000000..9267690b --- /dev/null +++ b/_modules/dicee/knowledge_graph_embeddings.html @@ -0,0 +1,1384 @@ + + + + + + + + dicee.knowledge_graph_embeddings - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.knowledge_graph_embeddings

+import os.path
+from typing import List, Tuple, Set, Iterable, Dict, Union
+import torch
+from torch import optim
+from torch.utils.data import DataLoader
+from .abstracts import BaseInteractiveKGE
+from .dataset_classes import TriplePredictionDataset
+from .static_funcs import random_prediction, deploy_triple_prediction, deploy_tail_entity_prediction, \
+    deploy_relation_prediction, deploy_head_entity_prediction, load_pickle
+from .static_funcs_training import evaluate_lp
+from .static_preprocess_funcs import create_constraints
+import numpy as np
+import sys
+
+
+
+[docs] +class KGE(BaseInteractiveKGE): + """ Knowledge Graph Embedding Class for interactive usage of pre-trained models""" + + # @TODO: we can download the model if it is not present locally + def __init__(self, path, construct_ensemble=False, + model_name=None, + apply_semantic_constraint=False): + super().__init__(path=path, construct_ensemble=construct_ensemble, model_name=model_name) + # See https://numpy.org/doc/stable/reference/generated/numpy.memmap.html + # If file exists + if os.path.exists(path + '/train_set.npy'): + self.train_set = np.load(file=path + '/train_set.npy', mmap_mode='r') + + if apply_semantic_constraint: + (self.domain_constraints_per_rel, self.range_constraints_per_rel, + self.domain_per_rel, self.range_per_rel) = create_constraints(self.train_set) + + def __str__(self): + return "KGE | " + str(self.model) + +
+[docs] + def eval_lp_performance(self, dataset=List[Tuple[str, str, str]], filtered=True): + assert isinstance(dataset, list) and len(dataset) > 0 + + idx_dataset = np.array( + [(self.entity_to_idx[s], self.relation_to_idx[p], self.entity_to_idx[o]) for s, p, o in dataset]) + if filtered: + return evaluate_lp(model=self.model, triple_idx=idx_dataset, num_entities=len(self.entity_to_idx), + er_vocab=load_pickle(self.path + '/er_vocab.p'), + re_vocab=load_pickle(self.path + '/re_vocab.p')) + else: + return evaluate_lp(model=self.model, triple_idx=idx_dataset, num_entities=len(self.entity_to_idx), + er_vocab=None, re_vocab=None)
+ + +
+[docs] + def predict_missing_head_entity(self, relation: List[str], tail_entity: List[str]) -> Tuple: + """ + Given a relation and a tail entity, return top k ranked head entity. + + argmax_{e \in E } f(e,r,t), where r \in R, t \in E. + + Parameter + --------- + relation: List[str] + + String representation of selected relations. + + tail_entity: List[str] + + String representation of selected entities. + + + k: int + + Highest ranked k entities. + + Returns: Tuple + --------- + + Highest K scores and entities + """ + + head_entity = torch.arange(0, len(self.entity_to_idx)) + relation = torch.LongTensor([self.relation_to_idx[i] for i in relation]) + tail_entity = torch.LongTensor([self.entity_to_idx[i] for i in tail_entity]) + x = torch.stack((head_entity, + relation.repeat(self.num_entities, ), + tail_entity.repeat(self.num_entities, )), dim=1) + return self.model.forward(x)
+ + +
+[docs] + def predict_missing_relations(self, head_entity: List[str], tail_entity: List[str]) -> Tuple: + """ + Given a head entity and a tail entity, return top k ranked relations. + + argmax_{r \in R } f(h,r,t), where h, t \in E. + + + Parameter + --------- + head_entity: List[str] + + String representation of selected entities. + + tail_entity: List[str] + + String representation of selected entities. + + + k: int + + Highest ranked k entities. + + Returns: Tuple + --------- + + Highest K scores and entities + """ + + head_entity = torch.LongTensor([self.entity_to_idx[i] for i in head_entity]) + relation = torch.arange(0, len(self.relation_to_idx)) + tail_entity = torch.LongTensor([self.entity_to_idx[i] for i in tail_entity]) + + x = torch.stack((head_entity.repeat(self.num_relations, ), + relation, + tail_entity.repeat(self.num_relations, )), dim=1) + return self.model(x)
+ + # scores = self.model(x) + # sort_scores, sort_idxs = torch.topk(scores, topk) + # return sort_scores, [self.idx_to_relations[i] for i in sort_idxs.tolist()] + +
+[docs] + def predict_missing_tail_entity(self, head_entity: List[str], relation: List[str]) -> torch.FloatTensor: + """ + Given a head entity and a relation, return top k ranked entities + + argmax_{e \in E } f(h,r,e), where h \in E and r \in R. + + + Parameter + --------- + head_entity: List[str] + + String representation of selected entities. + + tail_entity: List[str] + + String representation of selected entities. + + Returns: Tuple + --------- + + scores + """ + x = torch.cat((torch.LongTensor([self.entity_to_idx[i] for i in head_entity]).unsqueeze(-1), + torch.LongTensor([self.relation_to_idx[i] for i in relation]).unsqueeze(-1)), dim=1) + return self.model.forward(x)
+ + +
+[docs] + def predict(self, *, h: List[str] = None, r: List[str] = None, t: List[str] = None): + # (1) Sanity checking. + if h is not None: + assert isinstance(h, list) + assert isinstance(h[0], str) + if r is not None: + assert isinstance(r, list) + assert isinstance(r[0], str) + if t is not None: + assert isinstance(t, list) + assert isinstance(t[0], str) + # (2) Predict missing head entity given a relation and a tail entity. + if h is None: + assert r is not None + assert t is not None + # ? r, t + scores = self.predict_missing_head_entity(r, t) + # (3) Predict missing relation given a head entity and a tail entity. + elif r is None: + assert h is not None + assert t is not None + # h ? t + scores = self.predict_missing_relations(h, t) + # (4) Predict missing tail entity given a head entity and a relation + elif t is None: + assert h is not None + assert r is not None + # h r ? + scores = self.predict_missing_tail_entity(h, r) + else: + assert len(h) == len(r) == len(t) + scores = self.triple_score(h, r, t) + return torch.sigmoid(scores)
+ + +
+[docs] + def predict_topk(self, *, h: List[str] = None, r: List[str] = None, t: List[str] = None, + topk: int = 10): + """ + Predict missing item in a given triple. + + + + Parameter + --------- + head_entity: List[str] + + String representation of selected entities. + + relation: List[str] + + String representation of selected relations. + + tail_entity: List[str] + + String representation of selected entities. + + + k: int + + Highest ranked k item. + + Returns: Tuple + --------- + + Highest K scores and items + """ + + # (1) Sanity checking. + if h is not None: + assert isinstance(h, list) + if r is not None: + assert isinstance(r, list) + if t is not None: + assert isinstance(t, list) + # (2) Predict missing head entity given a relation and a tail entity. + if h is None: + assert r is not None + assert t is not None + # ? r, t + scores = self.predict_missing_head_entity(r, t).flatten() + if self.apply_semantic_constraint: + # filter the scores + for th, i in enumerate(r): + scores[self.domain_constraints_per_rel[self.relation_to_idx[i]]] = -torch.inf + + sort_scores, sort_idxs = torch.topk(scores, topk) + return [(self.idx_to_entity[idx_top_entity], scores.item()) for idx_top_entity, scores in + zip(sort_idxs.tolist(), torch.sigmoid(sort_scores))] + + # (3) Predict missing relation given a head entity and a tail entity. + elif r is None: + assert h is not None + assert t is not None + # h ? t + scores = self.predict_missing_relations(h, t).flatten() + sort_scores, sort_idxs = torch.topk(scores, topk) + return [(self.idx_to_relations[idx_top_entity], scores.item()) for idx_top_entity, scores in + zip(sort_idxs.tolist(), torch.sigmoid(sort_scores))] + + # (4) Predict missing tail entity given a head entity and a relation + elif t is None: + assert h is not None + assert r is not None + # h r ?t + scores = self.predict_missing_tail_entity(h, r).flatten() + if self.apply_semantic_constraint: + # filter the scores + for th, i in enumerate(r): + scores[self.range_constraints_per_rel[self.relation_to_idx[i]]] = -torch.inf + sort_scores, sort_idxs = torch.topk(scores, topk) + return [(self.idx_to_entity[idx_top_entity], scores.item()) for idx_top_entity, scores in + zip(sort_idxs.tolist(), torch.sigmoid(sort_scores))] + else: + raise AttributeError('Use triple_score method')
+ + +
+[docs] + def triple_score(self, h: List[str] = None, r: List[str] = None, + t: List[str] = None, logits=False) -> torch.FloatTensor: + """ + Predict triple score + + Parameter + --------- + head_entity: List[str] + + String representation of selected entities. + + relation: List[str] + + String representation of selected relations. + + tail_entity: List[str] + + String representation of selected entities. + + logits: bool + + If logits is True, unnormalized score returned + + Returns: Tuple + --------- + + pytorch tensor of triple score + """ + h = torch.LongTensor([self.entity_to_idx[i] for i in h]).reshape(len(h), 1) + r = torch.LongTensor([self.relation_to_idx[i] for i in r]).reshape(len(r), 1) + t = torch.LongTensor([self.entity_to_idx[i] for i in t]).reshape(len(t), 1) + + x = torch.hstack((h, r, t)) + if self.apply_semantic_constraint: + raise NotImplementedError() + else: + with torch.no_grad(): + out = self.model(x) + if logits: + return out + else: + return torch.sigmoid(out)
+ + +
+[docs] + def t_norm(self, tens_1: torch.Tensor, tens_2: torch.Tensor, tnorm: str = 'min') -> torch.Tensor: + if 'min' in tnorm: + return torch.min(tens_1, tens_2) + elif 'prod' in tnorm: + return tens_1 * tens_2
+ + +
+[docs] + def tensor_t_norm(self, subquery_scores: torch.FloatTensor, tnorm: str = "min") -> torch.FloatTensor: + """ + Compute T-norm over [0,1] ^{n \times d} where n denotes the number of hops and d denotes number of entities + """ + if "min" == tnorm: + return torch.min(subquery_scores, dim=0) + elif "prod" == tnorm: + print(subquery_scores.shape) + print(subquery_scores[:, :10]) + # Take the last row of the cumulative product over subquery scores + print(torch.cumprod(subquery_scores, dim=0)[-1, :10]) + exit(1) + return torch.cumprod(subquery_scores, dim=0)[-1, :] + else: + raise NotImplementedError(f"{tnorm} is not implemented")
+ + +
+[docs] + def t_conorm(self, tens_1: torch.Tensor, tens_2: torch.Tensor, tconorm: str = 'min') -> torch.Tensor: + if 'min' in tconorm: + return torch.max(tens_1, tens_2) + elif 'prod' in tconorm: + return (tens_1 + tens_2) - (tens_1 * tens_2)
+ + +
+[docs] + def negnorm(self, tens_1: torch.Tensor, lambda_: float, neg_norm: str = 'standard') -> torch.Tensor: + if 'standard' in neg_norm: + return 1 - tens_1 + elif 'sugeno' in neg_norm: + return (1 - tens_1) / (1 + lambda_ * tens_1) + elif 'yager' in neg_norm: + return (1 - torch.pow(tens_1, lambda_)) ** (1 / lambda_)
+ + + def __single_hop_query_answering(self, query: Tuple[str, Tuple[str, ...]]): + head, relation = query + assert len(relation) == 1 + return self.predict(h=[head], r=[relation[0]]) + + def __return_answers_and_scores(self, query_score_of_all_entities, k: int): + query_score_of_all_entities = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), query_score_of_all_entities)] + return sorted(query_score_of_all_entities, key=lambda x: x[1], reverse=True)[:k] + +
+[docs] + def answer_multi_hop_query(self, query_type: str = None, query: Tuple[Union[str, Tuple[str, str]], ...] = None, + queries: List[Tuple[Union[str, Tuple[str, str]], ...]] = None, tnorm: str = "prod", + neg_norm: str = "standard", lambda_: float = 0.0, k: int = 10, only_scores=False) -> \ + List[Tuple[str, torch.Tensor]]: + """ + Find an answer set for EPFO queries including negation and disjunction + + Parameter + ---------- + query_type: str + The type of the query, e.g., "2p". + + query: Union[str, Tuple[str, Tuple[str, str]]] + The query itself, either a string or a nested tuple. + + queries: List of Tuple[Union[str, Tuple[str, str]], ...] + + tnorm: str + The t-norm operator. + + neg_norm: str + The negation norm. + + lambda_: float + lambda parameter for sugeno and yager negation norms + + k: int + The top-k substitutions for intermediate variables. + + Returns + ------- + List[Tuple[str, torch.Tensor]] + Entities and corresponding scores sorted in the descening order of scores + """ + + if queries is not None: + results = [] + for i in queries: + assert query is None + results.append( + self.answer_multi_hop_query(query_type=query_type, query=i, tnorm=tnorm, neg_norm=neg_norm, + lambda_=lambda_, k=k, only_scores=only_scores)) + return results + + assert len(self.entity_to_idx) >= k >= 0 + + query_name_dict = { + ("e", ("r",)): "1p", + ("e", ("r", "r")): "2p", + ("e", ("r", "r", "r",),): "3p", + (("e", ("r",)), ("e", ("r",))): "2i", + (("e", ("r",)), ("e", ("r",)), ("e", ("r",))): "3i", + ((("e", ("r",)), ("e", ("r",))), ("r",)): "ip", + (("e", ("r", "r")), ("e", ("r",))): "pi", + # negation + (("e", ("r",)), ("e", ("r", "n"))): "2in", + (("e", ("r",)), ("e", ("r",)), ("e", ("r", "n"))): "3in", + ((("e", ("r",)), ("e", ("r", "n"))), ("r",)): "inp", + (("e", ("r", "r")), ("e", ("r", "n"))): "pin", + (("e", ("r", "r", "n")), ("e", ("r",))): "pni", + + # union + (("e", ("r",)), ("e", ("r",)), ("u",)): "2u", + ((("e", ("r",)), ("e", ("r",)), ("u",)), ("r",)): "up", + + } + + # Create an inverse mapping + inverse_query_name_dict = {v: k for k, v in query_name_dict.items()} + + # Look up the corresponding query_structure + if query_type in inverse_query_name_dict: + query_structure = inverse_query_name_dict[query_type] + else: + raise ValueError(f"Invalid query type: {query_type}") + + # 1p + if query_structure == ("e", ("r",)): + atom1_scores = self.__single_hop_query_answering(query=query).squeeze() + if only_scores: + return atom1_scores + return self.__return_answers_and_scores(atom1_scores, k) + # 2p + elif query_structure == ("e", ("r", "r",)): + # ?M : \exist A. r1(e,A) \land r2(A,M) + head1, (relation1, relation2) = query + top_k_scores1 = [] + atom2_scores = [] + # (1) Iterate over top k substitutes of A in the first hop query: r1(e,A) s.t. A<-a + for top_k_entity, score_of_e_r1_a in self.answer_multi_hop_query(query_type="1p", + query=(head1, (relation1,)), + tnorm=tnorm, + k=k): + top_k_scores1.append(score_of_e_r1_a) + # () Scores for all entities E + atom2_scores.append(self.predict(h=[top_k_entity], r=[relation2])) + # k by E tensor + atom2_scores = torch.cat(atom2_scores, dim=0) + topk_scores1_expanded = torch.FloatTensor(top_k_scores1).view(-1, 1).repeat(1, atom2_scores.shape[1]) + query_scores, _ = torch.max(self.t_norm(topk_scores1_expanded, atom2_scores, tnorm), dim=0) + if only_scores: + return query_scores + else: + return self.__return_answers_and_scores(query_scores, k) + # 3p + elif query_structure == ("e", ("r", "r", "r",)): + # @TODO: explain the query and answering + head1, (relation1, relation2, relation3) = query + top_k_scores1 = [] + atom2_scores = [] + # (1) Iterate over top k substitutes of A in the first hop query: r1(e,A) s.t. A<-a + for top_k_entity, score_of_e_r1_a in self.answer_multi_hop_query(query_type="2p", + query=(head1, (relation1, relation2)), + tnorm=tnorm, + k=k): + top_k_scores1.append(score_of_e_r1_a) + # () Scores for all entities E + atom2_scores.append(self.predict(h=[top_k_entity], r=[relation3])) + # k by E tensor + atom2_scores = torch.cat(atom2_scores, dim=0) + topk_scores1_expanded = torch.FloatTensor(top_k_scores1).view(-1, 1).repeat(1, atom2_scores.shape[1]) + query_scores, _ = torch.max(self.t_norm(topk_scores1_expanded, atom2_scores, tnorm), dim=0) + if only_scores: + return query_scores + else: + return self.__return_answers_and_scores(query_scores, k) + # 2in + elif query_structure == (("e", ("r",)), ("e", ("r", "n"))): + # entity_scores = scores_2in(query, tnorm, neg_norm, lambda_) + head1, relation1 = query[0] + head2, relation2 = query[1] + + # Calculate entity scores for each query + # Get scores for the first atom (positive) + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom (negative) + # if neg_norm == "standard": + predictions = self.predict(h=[head2], r=[relation2[0]]).squeeze() + atom2_scores = self.negnorm(predictions, lambda_, neg_norm) + + assert len(atom1_scores) == len(self.entity_to_idx) + + combined_scores = self.t_norm(atom1_scores, atom2_scores, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # 3in + elif query_structure == (("e", ("r",)), ("e", ("r",)), ("e", ("r", "n"))): + # entity_scores = scores_3in(model, query, tnorm, neg_norm, lambda_) + head1, relation1 = query[0] + head2, relation2 = query[1] + head3, relation3 = query[2] + + # Calculate entity scores for each query + # Get scores for the first atom (positive) + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom (negative) + # modelling standard negation (1-x) + atom2_scores = self.predict(h=[head2], r=[relation2[0]]).squeeze() + # Get scores for the third atom + # if neg_norm == "standard": + predictions = self.predict(h=[head3], r=[relation3[0]]).squeeze() + atom3_scores = self.negnorm(predictions, lambda_, neg_norm) + + assert len(atom1_scores) == len(self.entity_to_idx) + + inter_scores = self.t_norm(atom1_scores, atom2_scores, tnorm) + combined_scores = self.t_norm(inter_scores, atom3_scores, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # pni + elif query_structure == (("e", ("r", "r", "n")), ("e", ("r",))): + # entity_scores = scores_pni(model, query, tnorm, neg_norm, lambda_, k_) + head1, (relation1, relation2, _) = query[0] + head3, relation3 = query[1] + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + # sort atom1_scores in descending order and get the top k entities indices + top_k_scores1, top_k_indices = torch.topk(atom1_scores, k) + + # using model.entity_to_idx.keys() take the name of entities from topk heads 2 + entity_to_idx_keys = list(self.entity_to_idx.keys()) + top_k_heads = [entity_to_idx_keys[idx.item()] for idx in top_k_indices] + + # Get scores for the second atom + # Initialize an empty tensor + atom2_scores = torch.empty(0, len(self.entity_to_idx)).to(atom1_scores.device) + + # Get scores for the second atom + for head2 in top_k_heads: + # The score tensor for the current head2 + atom2_score = self.predict(h=[head2], r=[relation2]) + neg_atom2_score = self.negnorm(atom2_score, lambda_, neg_norm) + # Concatenate the score tensor for the current head2 with the previous scores + atom2_scores = torch.cat([atom2_scores, neg_atom2_score], dim=0) + + topk_scores1_expanded = top_k_scores1.view(-1, 1).repeat(1, atom2_scores.shape[1]) + + inter_scores = self.t_norm(topk_scores1_expanded, atom2_scores, tnorm) + + scores_2pn_query, _ = torch.max(inter_scores, dim=0) + scores_1p_query = self.predict(h=[head3], r=[relation3[0]]).squeeze() + + combined_scores = self.t_norm(scores_2pn_query, scores_1p_query, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # pin + elif query_structure == (("e", ("r", "r")), ("e", ("r", "n"))): + # entity_scores = scores_pin(model, query, tnorm, neg_norm, lambda_, k_) + head1, (relation1, relation2) = query[0] + head3, relation3 = query[1] + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + + # sort atom1_scores in descending order and get the top k entities indices + top_k_scores1, top_k_indices = torch.topk(atom1_scores, k) + + # using model.entity_to_idx.keys() take the name of entities from topk heads 2 + entity_to_idx_keys = list(self.entity_to_idx.keys()) + top_k_heads = [entity_to_idx_keys[idx.item()] for idx in top_k_indices] + + # Initialize an empty tensor + atom2_scores = torch.empty(0, len(self.entity_to_idx)).to(atom1_scores.device) + + # Get scores for the second atom + for head2 in top_k_heads: + # The score tensor for the current head2 + atom2_score = self.predict(h=[head2], r=[relation2]) + # Concatenate the score tensor for the current head2 with the previous scores + atom2_scores = torch.cat([atom2_scores, atom2_score], dim=0) + + topk_scores1_expanded = top_k_scores1.view(-1, 1).repeat(1, atom2_scores.shape[1]) + + inter_scores = self.t_norm(topk_scores1_expanded, atom2_scores, tnorm) + + scores_2p_query, _ = torch.max(inter_scores, dim=0) + + scores_1p_query = self.predict(h=[head3], r=[relation3[0]]).squeeze() + # taking negation for the e,(r,n) part of query + neg_scores_1p_query = self.negnorm(scores_1p_query, lambda_, neg_norm) + combined_scores = self.t_norm(scores_2p_query, neg_scores_1p_query, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # inp + elif query_structure == ((("e", ("r",)), ("e", ("r", "n"))), ("r",)): + # entity_scores = scores_inp(model, query, tnorm, neg_norm, lambda_, k_) + head1, relation1 = query[0][0] + head2, relation2 = query[0][1] + relation_1p = query[1] + + # Calculate entity scores for each query + # Get scores for the first atom (positive) + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom (negative) + # if neg_norm == "standard": + predictions = self.predict(h=[head2], r=[relation2[0]]).squeeze() + atom2_scores = self.negnorm(predictions, lambda_, neg_norm) + + assert len(atom1_scores) == len(self.entity_to_idx) + + scores_2in_query = self.t_norm(atom1_scores, atom2_scores, tnorm) + + # sort atom1_scores in descending order and get the top k entities indices + top_k_scores1, top_k_indices = torch.topk(scores_2in_query, k) + + # using model.entity_to_idx.keys() take the name of entities from topk heads 2 + entity_to_idx_keys = list(self.entity_to_idx.keys()) + top_k_heads = [entity_to_idx_keys[idx.item()] for idx in top_k_indices] + + # Get scores for the second atom + # Initialize an empty tensor + atom3_scores = torch.empty(0, len(self.entity_to_idx)).to(scores_2in_query.device) + + # Get scores for the second atom + for head3 in top_k_heads: + # The score tensor for the current head2 + atom3_score = self.predict(h=[head3], r=[relation_1p[0]]) + # Concatenate the score tensor for the current head2 with the previous scores + atom3_scores = torch.cat([atom3_scores, atom3_score], dim=0) + + topk_scores1_expanded = top_k_scores1.view(-1, 1).repeat(1, atom3_scores.shape[1]) + + combined_scores = self.t_norm(topk_scores1_expanded, atom3_scores, tnorm) + + res, _ = torch.max(combined_scores, dim=0) + if only_scores: + return res + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), res)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # 2i + elif query_structure == (("e", ("r",)), ("e", ("r",))): + # entity_scores = scores_2i(model, query, tnorm) + head1, relation1 = query[0] + head2, relation2 = query[1] + + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom + atom2_scores = self.predict(h=[head2], r=[relation2[0]]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + + combined_scores = self.t_norm(atom1_scores, atom2_scores, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # 3i + elif query_structure == (("e", ("r",)), ("e", ("r",)), ("e", ("r",))): + # entity_scores = scores_3i(model, query, tnorm) + head1, relation1 = query[0] + head2, relation2 = query[1] + head3, relation3 = query[2] + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom + atom2_scores = self.predict(h=[head2], r=[relation2[0]]).squeeze() + # Get scores for the third atom + atom3_scores = self.predict(h=[head3], r=[relation3[0]]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + + inter_scores = self.t_norm(atom1_scores, atom2_scores, tnorm) + combined_scores = self.t_norm(inter_scores, atom3_scores, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # pi + elif query_structure == (("e", ("r", "r")), ("e", ("r",))): + # entity_scores = scores_pi(model, query, tnorm, k_) + head1, (relation1, relation2) = query[0] + head3, relation3 = query[1] + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + # sort atom1_scores in descending order and get the top k entities indices + top_k_scores1, top_k_indices = torch.topk(atom1_scores, k) + + # using model.entity_to_idx.keys() take the name of entities from topk heads 2 + entity_to_idx_keys = list(self.entity_to_idx.keys()) + top_k_heads = [entity_to_idx_keys[idx.item()] for idx in top_k_indices] + + # Initialize an empty tensor + atom2_scores = torch.empty(0, len(self.entity_to_idx)).to(atom1_scores.device) + + # Get scores for the second atom + for head2 in top_k_heads: + # The score tensor for the current head2 + atom2_score = self.predict(h=[head2], r=[relation2]) + # Concatenate the score tensor for the current head2 with the previous scores + atom2_scores = torch.cat([atom2_scores, atom2_score], dim=0) + + topk_scores1_expanded = top_k_scores1.view(-1, 1).repeat(1, atom2_scores.shape[1]) + + inter_scores = self.t_norm(topk_scores1_expanded, atom2_scores, tnorm) + + scores_2p_query, _ = torch.max(inter_scores, dim=0) + + scores_1p_query = self.predict(h=[head3], r=[relation3[0]]).squeeze() + + combined_scores = self.t_norm(scores_2p_query, scores_1p_query, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # ip + elif query_structure == ((("e", ("r",)), ("e", ("r",))), ("r",)): + # entity_scores = scores_ip(model, query, tnorm, k_) + head1, relation1 = query[0][0] + head2, relation2 = query[0][1] + relation_1p = query[1] + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom + atom2_scores = self.predict(h=[head2], r=[relation2[0]]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + + scores_2i_query = self.t_norm(atom1_scores, atom2_scores, tnorm) + # Get the top k entities from the 2i query + + # sort atom1_scores in descending order and get the top k entities indices + top_k_scores1, top_k_indices = torch.topk(scores_2i_query, k) + + # using model.entity_to_idx.keys() take the name of entities from topk heads + entity_to_idx_keys = list(self.entity_to_idx.keys()) + top_k_heads = [entity_to_idx_keys[idx.item()] for idx in top_k_indices] + + # Get scores for the second atom + # Initialize an empty tensor + atom3_scores = torch.empty(0, len(self.entity_to_idx)).to(scores_2i_query.device) + + # Get scores for the second atom + for head3 in top_k_heads: + # The score tensor for the current head2 + atom3_score = self.predict(h=[head3], r=[relation_1p[0]]) + # Concatenate the score tensor for the current head2 with the previous scores + atom3_scores = torch.cat([atom3_scores, atom3_score], dim=0) + + topk_scores1_expanded = top_k_scores1.view(-1, 1).repeat(1, atom3_scores.shape[1]) + + combined_scores = self.t_norm(topk_scores1_expanded, atom3_scores, tnorm) + res, _ = torch.max(combined_scores, dim=0) + if only_scores: + return res + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), res)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + # disjunction + # 2u + elif query_structure == (("e", ("r",)), ("e", ("r",)), ("u",)): + # entity_scores = scores_2u(model, query, tnorm) + head1, relation1 = query[0] + head2, relation2 = query[1] + + # Calculate entity scores for each query + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + # Get scores for the second atom + atom2_scores = self.predict(h=[head2], r=[relation2[0]]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + + combined_scores = self.t_conorm(atom1_scores, atom2_scores, tnorm) + if only_scores: + return combined_scores + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), combined_scores)] + entity_scores = sorted(entity_scores, key=lambda x: x[1], reverse=True) + + return entity_scores + # up + # here the second tnorm is for t-conorm (used in pairs) + elif query_structure == ((("e", ("r",)), ("e", ("r",)), ("u",)), ("r",)): + # entity_scores = scores_up(model, query, tnorm, tnorm, k_) + head1, relation1 = query[0][0] + head2, relation2 = query[0][1] + relation_1p = query[1] + + # Get scores for the first atom + atom1_scores = self.predict(h=[head1], r=[relation1[0]]).squeeze() + + # Get scores for the second atom + atom2_scores = self.predict(h=[head2], r=[relation2[0]]).squeeze() + + assert len(atom1_scores) == len(self.entity_to_idx) + + scores_2u_query = self.t_conorm(atom1_scores, atom2_scores, tnorm) + + # Sort atom1_scores in descending order and get the top k entities indices + top_k_scores1, top_k_indices = torch.topk(scores_2u_query, k) + + # Using model.entity_to_idx.keys() take the name of entities from topk heads + entity_to_idx_keys = list(self.entity_to_idx.keys()) + top_k_heads = [entity_to_idx_keys[idx.item()] for idx in top_k_indices] + + # Initialize an empty tensor + atom3_scores = torch.empty(0, len(self.entity_to_idx)).to(scores_2u_query.device) + + for head3 in top_k_heads: + # The score tensor for the current head3 + atom3_score = self.predict(h=[head3], r=[relation_1p[0]]) + + # Concatenate the score tensor for the current head3 with the previous scores + atom3_scores = torch.cat([atom3_scores, atom3_score], dim=0) + + topk_scores1_expanded = top_k_scores1.view(-1, 1).repeat(1, atom3_scores.shape[1]) + combined_scores = self.t_norm(topk_scores1_expanded, atom3_scores, tnorm) + res, _ = torch.max(combined_scores, dim=0) + if only_scores: + return res + entity_scores = [(ei, s) for ei, s in zip(self.entity_to_idx.keys(), res)] + return sorted(entity_scores, key=lambda x: x[1], reverse=True) + else: + raise RuntimeError(f"Incorrect query_structure {query_structure}")
+ + +
+[docs] + def find_missing_triples(self, confidence: float, entities: List[str] = None, relations: List[str] = None, + topk: int = 10, + at_most: int = sys.maxsize) -> Set: + """ + Find missing triples + + Iterative over a set of entities E and a set of relation R : \forall e \in E and \forall r \in R f(e,r,x) + Return (e,r,x)\not\in G and f(e,r,x) > confidence + + Parameter + --------- + confidence: float + + A threshold for an output of a sigmoid function given a triple. + + topk: int + + Highest ranked k item to select triples with f(e,r,x) > confidence . + + at_most: int + + Stop after finding at_most missing triples + + Returns: Set + --------- + + {(e,r,x) | f(e,r,x) > confidence \land (e,r,x) \not\in G + """ + + assert 1.0 >= confidence >= 0.0 + assert topk >= 1 + + def select(items: List[str], item_mapping: Dict[str, int]) -> Iterable[Tuple[str, int]]: + """ + Get selected entities and their indexes + + Parameter + --------- + items: list + + item_mapping: dict + + + Returns: Iterable + --------- + + """ + + if items is None: + return item_mapping.items() + else: + return ((i, item_mapping[i]) for i in items) + + extended_triples = set() + print(f'Number of entities:{len(self.entity_to_idx)} \t Number of relations:{len(self.relation_to_idx)}') + + # (5) Cartesian Product over entities and relations + # (5.1) Iterate over entities + print('Finding missing triples..') + for str_head_entity, idx_entity in select(entities, self.entity_to_idx): + # (5.1) Iterate over relations + for str_relation, idx_relation in select(relations, self.relation_to_idx): + # (5.2) \forall e \in Entities store a tuple of scoring_func(head,relation,e) and e + # (5.3.) Sort (5.2) and return top tuples + predictions = self.predict_topk(h=[str_head_entity], r=[str_relation], topk=topk) + # (5.4) Iterate over 5.3 + for str_entity, predicted_score in predictions: + # (5.5) If score is less than 99% ignore it + if predicted_score < confidence: + break + else: + # (5.8) Remember it + extended_triples.add((str_head_entity, str_relation, str_entity)) + print(f'Number of found missing triples: {len(extended_triples)}') + if len(extended_triples) == at_most: + return extended_triples + # No need to store a large KG into memory + # /5.6) False if 0, otherwise 1 + is_in = np.any( + np.all(self.train_set == [idx_entity, idx_relation, self.entity_to_idx[str_entity]], + axis=1)) + # (5.7) If (5.6) is true, ignore it + if is_in: + continue + else: + # (5.8) Remember it + extended_triples.add((str_head_entity, str_relation, str_entity)) + print(f'Number of found missing triples: {len(extended_triples)}') + if len(extended_triples) == at_most: + return extended_triples + return extended_triples
+ + +
+[docs] + def deploy(self, share: bool = False, top_k: int = 10): + import gradio as gr + + def predict(str_subject: str, str_predicate: str, str_object: str, random_examples: bool): + if random_examples: + return random_prediction(self) + else: + if self.is_seen(entity=str_subject) and self.is_seen( + relation=str_predicate) and self.is_seen(entity=str_object): + """ Triple Prediction """ + return deploy_triple_prediction(self, str_subject, str_predicate, str_object) + + elif self.is_seen(entity=str_subject) and self.is_seen( + relation=str_predicate): + """ Tail Entity Prediction """ + return deploy_tail_entity_prediction(self, str_subject, str_predicate, top_k) + elif self.is_seen(entity=str_object) and self.is_seen( + relation=str_predicate): + """ Head Entity Prediction """ + return deploy_head_entity_prediction(self, str_object, str_predicate, top_k) + elif self.is_seen(entity=str_subject) and self.is_seen(entity=str_object): + """ Relation Prediction """ + return deploy_relation_prediction(self, str_subject, str_object, top_k) + else: + KeyError('Uncovered scenario') + # If user simply select submit + return random_prediction(self) + + gr.Interface( + fn=predict, + inputs=[gr.inputs.Textbox(lines=1, placeholder=None, label='Subject'), + gr.inputs.Textbox(lines=1, placeholder=None, label='Predicate'), + gr.inputs.Textbox(lines=1, placeholder=None, label='Object'), "checkbox"], + outputs=[gr.outputs.Textbox(label='Input Triple'), + gr.outputs.Dataframe(label='Outputs', type='pandas')], + title=f'{self.name} Deployment', + description='1. Enter a triple to compute its score,\n' + '2. Enter a subject and predicate pair to obtain most likely top ten entities or\n' + '3. Checked the random examples box and click submit').launch(share=share)
+ + + # @TODO: Do we really need this ?! +
+[docs] + def train_triples(self, h: List[str], r: List[str], t: List[str], labels: List[float], + iteration=2, optimizer=None): + assert len(h) == len(r) == len(t) == len(labels) + # (1) From List of strings to TorchLongTensor. + x = torch.LongTensor(self.index_triple(h, r, t)).reshape(1, 3) + # (2) From List of float to Torch Tensor. + labels = torch.FloatTensor(labels) + # (3) Train mode. + self.set_model_train_mode() + if optimizer is None: + optimizer = optim.Adam(self.model.parameters(), lr=0.1) + print('Iteration starts...') + # (4) Train. + for epoch in range(iteration): + optimizer.zero_grad() + outputs = self.model(x) + loss = self.model.loss(outputs, labels) + print(f"Iteration:{epoch}\t Loss:{loss.item()}\t Outputs:{outputs.detach().mean()}") + loss.backward() + optimizer.step() + # (5) Eval + self.set_model_eval_mode() + with torch.no_grad(): + outputs = self.model(x) + loss = self.model.loss(outputs, labels) + print(f"Eval Mode:\tLoss:{loss.item()}")
+ + +
+[docs] + def train_k_vs_all(self, h, r, iteration=1, lr=.001): + """ + Train k vs all + :param head_entity: + :param relation: + :param iteration: + :param lr: + :return: + """ + assert len(h) == 1 + # (1) Construct input and output + out = self.construct_input_and_output_k_vs_all(h, r) + if out is None: + return + x, labels, idx_tails = out + # (2) Train mode + self.set_model_train_mode() + # (3) Initialize optimizer # SGD considerably faster than ADAM. + optimizer = optim.Adam(self.model.parameters(), lr=lr, weight_decay=.00001) + + print('\nIteration starts.') + # (3) Iterative training. + for epoch in range(iteration): + optimizer.zero_grad() + outputs = self.model(x) + loss = self.model.loss(outputs, labels) + if len(idx_tails) > 0: + print( + f"Iteration:{epoch}\t" + f"Loss:{loss.item()}\t" + f"Avg. Logits for correct tails: {outputs[0, idx_tails].flatten().mean().detach()}") + else: + print( + f"Iteration:{epoch}\t" + f"Loss:{loss.item()}\t" + f"Avg. Logits for all negatives: {outputs[0].flatten().mean().detach()}") + + loss.backward() + optimizer.step() + if loss.item() < .00001: + print(f'loss is {loss.item():.3f}. Converged !!!') + break + # (4) Eval mode + self.set_model_eval_mode() + with torch.no_grad(): + outputs = self.model(x) + loss = self.model.loss(outputs, labels) + print(f"Eval Mode:Loss:{loss.item():.4f}\t Outputs:{outputs[0, idx_tails].flatten().detach()}\n")
+ + +
+[docs] + def train(self, kg, lr=.1, epoch=10, batch_size=32, neg_sample_ratio=10, num_workers=1) -> None: + """ Retrained a pretrain model on an input KG via negative sampling.""" + # (1) Create Negative Sampling Setting for training + print('Creating Dataset...') + train_set = TriplePredictionDataset(kg.train_set, + num_entities=len(kg.entity_to_idx), + num_relations=len(kg.relation_to_idx), + neg_sample_ratio=neg_sample_ratio) + num_data_point = len(train_set) + print('Number of data points: ', num_data_point) + train_dataloader = DataLoader(train_set, batch_size=batch_size, + # shuffle => to have the data reshuffled at every epoc + shuffle=True, num_workers=num_workers, + collate_fn=train_set.collate_fn, pin_memory=True) + + # (2) Go through valid triples + corrupted triples and compute scores. + # Average loss per triple is stored. This will be used to indicate whether we learned something. + print('First Eval..') + self.set_model_eval_mode() + first_avg_loss_per_triple = 0 + for x, y in train_dataloader: + pred = self.model(x) + first_avg_loss_per_triple += self.model.loss(pred, y) + first_avg_loss_per_triple /= num_data_point + print(first_avg_loss_per_triple) + # (3) Prepare Model for Training + self.set_model_train_mode() + optimizer = optim.Adam(self.model.parameters(), lr=lr) + print('Training Starts...') + for epoch in range(epoch): # loop over the dataset multiple times + epoch_loss = 0 + for x, y in train_dataloader: + # zero the parameter gradients + optimizer.zero_grad() + # forward + backward + optimize + outputs = self.model(x) + loss = self.model.loss(outputs, y) + epoch_loss += loss.item() + loss.backward() + optimizer.step() + print(f'Epoch={epoch}\t Avg. Loss per epoch: {epoch_loss / num_data_point:.3f}') + # (5) Prepare For Saving + self.set_model_eval_mode() + print('Eval starts...') + # (6) Eval model on training data to check how much an Improvement + last_avg_loss_per_triple = 0 + for x, y in train_dataloader: + pred = self.model(x) + last_avg_loss_per_triple += self.model.loss(pred, y) + last_avg_loss_per_triple /= len(train_set) + print(f'On average Improvement: {first_avg_loss_per_triple - last_avg_loss_per_triple:.3f}')
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/base_model.html b/_modules/dicee/models/base_model.html new file mode 100644 index 00000000..820d13f0 --- /dev/null +++ b/_modules/dicee/models/base_model.html @@ -0,0 +1,604 @@ + + + + + + + + dicee.models.base_model - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.base_model

+from typing import List, Any, Tuple, Union, Dict
+import pytorch_lightning
+import numpy as np
+import torch
+
+
+
+[docs] +class BaseKGE(pytorch_lightning.LightningModule): + def __init__(self, args: dict): + super().__init__() + self.args = args + self.embedding_dim = None + self.num_entities = None + self.num_relations = None + self.learning_rate = None + self.apply_unit_norm = None + self.input_dropout_rate = None + self.hidden_dropout_rate = None + self.optimizer_name = None + self.feature_map_dropout_rate = None + self.kernel_size = None + self.num_of_output_channels = None + self.weight_decay = None + self.loss = torch.nn.BCEWithLogitsLoss() + self.selected_optimizer = None + self.normalizer_class = None + self.normalize_head_entity_embeddings = IdentityClass() + self.normalize_relation_embeddings = IdentityClass() + self.normalize_tail_entity_embeddings = IdentityClass() + self.hidden_normalizer = IdentityClass() + self.param_init = None + self.init_params_with_sanity_checking() + + # Dropouts + self.input_dp_ent_real = torch.nn.Dropout(self.input_dropout_rate) + self.input_dp_rel_real = torch.nn.Dropout(self.input_dropout_rate) + self.hidden_dropout = torch.nn.Dropout(self.input_dropout_rate) + # average minibatch loss per epoch + self.loss_history = [] + +
+[docs] + def mem_of_model(self) -> Dict: + """ Size of model in MB and number of params""" + # https://discuss.pytorch.org/t/finding-model-size/130275/2 + # (2) Store NumParam and EstimatedSizeMB + num_params = sum(p.numel() for p in self.parameters()) + # Not quite sure about EstimatedSizeMB ? + buffer_size = 0 + for buffer in self.buffers(): + buffer_size += buffer.nelement() * buffer.element_size() + return {'EstimatedSizeMB': (num_params + buffer_size) / 1024 ** 2, 'NumParam': num_params}
+ + +
+[docs] + def init_params_with_sanity_checking(self): + if self.args.get('weight_decay'): + self.weight_decay = self.args['weight_decay'] + else: + self.weight_decay = 0.0 + if self.args.get('embedding_dim'): + self.embedding_dim = self.args['embedding_dim'] + else: + self.embedding_dim = 1 + + if self.args.get('num_entities'): + self.num_entities = self.args['num_entities'] + else: + self.num_entities = 1 + + if self.args.get('num_relations'): + self.num_relations = self.args['num_relations'] + else: + self.num_relations = 1 + + if self.args.get('learning_rate'): + self.learning_rate = self.args['learning_rate'] + else: + self.learning_rate = .1 + + if self.args.get("input_dropout_rate"): + self.input_dropout_rate = self.args['input_dropout_rate'] + else: + self.input_dropout_rate = 0.0 + if self.args.get("hidden_dropout_rate"): + self.hidden_dropout_rate = self.args['hidden_dropout_rate'] + else: + self.hidden_dropout_rate = 0.0 + if self.args.get("model") in ['ConvQ', 'ConvO', 'ConEx', 'AConEx', 'AConvQ', 'AConvO']: + if self.args.get("kernel_size"): + self.kernel_size = self.args['kernel_size'] + else: + self.kernel_size = 3 + if self.args.get("num_of_output_channels"): + self.num_of_output_channels = self.args['num_of_output_channels'] + else: + self.num_of_output_channels = 3 + if self.args.get("feature_map_dropout_rate"): + self.feature_map_dropout_rate = self.args['feature_map_dropout_rate'] + else: + self.feature_map_dropout_rate = 0.0 + + if self.args.get("normalization") == 'LayerNorm': + self.normalizer_class = torch.nn.LayerNorm + self.normalize_head_entity_embeddings = self.normalizer_class(self.embedding_dim) + self.normalize_relation_embeddings = self.normalizer_class(self.embedding_dim) + if self.args['scoring_technique'] in ['NegSample', 'KvsSample']: + self.normalize_tail_entity_embeddings = self.normalizer_class(self.embedding_dim) + elif self.args.get("normalization") == 'BatchNorm1d': + self.normalizer_class = torch.nn.BatchNorm1d + self.normalize_head_entity_embeddings = self.normalizer_class(self.embedding_dim, affine=False) + self.normalize_relation_embeddings = self.normalizer_class(self.embedding_dim, affine=False) + if self.args['scoring_technique'] in ['NegSample', 'KvsSample']: + self.normalize_tail_entity_embeddings = self.normalizer_class(self.embedding_dim, affine=False) + elif self.args.get("normalization") is None: + self.normalizer_class = IdentityClass + else: + raise NotImplementedError() + if self.args.get("optim") in ['NAdam', 'Adam', 'SGD']: + self.optimizer_name = self.args['optim'] + else: + print(f'--optim (***{self.args.get("optim")}***) not found') + self.optimizer_name = 'Adam' + + if self.args.get("init_param") is None: + self.param_init = IdentityClass + elif self.args['init_param'] == 'xavier_normal': + self.param_init = torch.nn.init.xavier_normal_ + else: + print(f'--init_param (***{self.args.get("init_param")}***) not found') + self.optimizer_name = IdentityClass
+ + +
+[docs] + def get_embeddings(self) -> Tuple[np.ndarray, np.ndarray]: + # @TODO why twice data.data.? + return self.entity_embeddings.weight.data.data.detach(), self.relation_embeddings.weight.data.detach()
+ + +
+[docs] + def configure_optimizers(self, parameters=None): + if parameters is None: + parameters = self.parameters() + + # default params in pytorch. + if self.optimizer_name == 'SGD': + self.selected_optimizer = torch.optim.SGD(params=parameters, lr=self.learning_rate, + momentum=0, dampening=0, weight_decay=self.weight_decay, + nesterov=False) + elif self.optimizer_name == 'Adam': + self.selected_optimizer = torch.optim.Adam(parameters, lr=self.learning_rate, + weight_decay=self.weight_decay) + + elif self.optimizer_name == 'NAdam': + self.selected_optimizer = torch.optim.NAdam(parameters, lr=self.learning_rate, betas=(0.9, 0.999), + eps=1e-08, weight_decay=self.weight_decay, momentum_decay=0.004) + elif self.optimizer_name == 'Adagrad': + self.selected_optimizer = torch.optim.Adagrad(parameters, + lr=self.learning_rate, eps=1e-10, + weight_decay=self.weight_decay) + elif self.optimizer_name == 'ASGD': + self.selected_optimizer = torch.optim.ASGD(parameters, + lr=self.learning_rate, lambd=0.0001, alpha=0.75, + weight_decay=self.weight_decay) + else: + raise KeyError() + return self.selected_optimizer
+ + +
+[docs] + def loss_function(self, yhat_batch, y_batch): + return self.loss(yhat_batch, y_batch)
+ + +
+[docs] + def forward_triples(self, *args, **kwargs): + raise ValueError(f'MODEL:{self.name} does not have forward_triples function')
+ + +
+[docs] + def forward_k_vs_all(self, *args, **kwargs): + raise ValueError(f'MODEL:{self.name} does not have forward_k_vs_all function')
+ + +
+[docs] + def forward_k_vs_sample(self, *args, **kwargs): + raise ValueError(f'MODEL:{self.name} does not have forward_k_vs_sample function')
+ + +
+[docs] + def forward(self, x: Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]], + y_idx: torch.LongTensor = None): + """ + + :param x: a batch of inputs + :param y_idx: indices of selected outputs. + :return: + """ + if isinstance(x, tuple): + x, y_idx = x + return self.forward_k_vs_sample(x=x, target_entity_idx=y_idx) + else: + batch_size, dim = x.shape + if dim == 3: + return self.forward_triples(x) + elif dim == 2: + # h, y = x[0], x[1] + # Note that y can be relation or tail entity. + return self.forward_k_vs_all(x=x) + else: + return self.forward_sequence(x=x)
+ + + """ + def training_step(self, batch, batch_idx): + if len(batch) == 2: + x_batch, y_batch = batch + yhat_batch = self.forward(x_batch) + elif len(batch) == 3: + x_batch, y_idx_batch, y_batch, = batch + yhat_batch = self.forward(x_batch, y_idx_batch) + else: + print(len(batch)) + raise ValueError('Unexpected batch shape..') + train_loss = self.loss_function(yhat_batch=yhat_batch, y_batch=y_batch) + return train_loss + """ + +
+[docs] + def training_step(self, batch, batch_idx=None): + x_batch, y_batch = batch + yhat_batch = self.forward(x_batch) + loss_batch = self.loss_function(yhat_batch, y_batch) + return loss_batch
+ + +
+[docs] + def training_epoch_end(self, training_step_outputs): + batch_losses = [i['loss'].item() for i in training_step_outputs] + avg = sum(batch_losses) / len(batch_losses) + self.loss_history.append(avg)
+ + +
+[docs] + def validation_step(self, batch, batch_idx): + """ + @ TODO + # from torchmetrics import Accuracy as accuracy + if len(batch) == 4: + h, r, t, y_batch = batch + predictions = self.forward_triples(h, r, t) + else: + h, x, y_batch = batch[:, 0], batch[:, 1], batch[:, 2] + predictions = self.forward_k_vs_all(h, x) + + val_loss = self.loss_function(predictions, y_batch) + val_accuracy = accuracy(predictions, y_batch) + return {'val_acc': val_accuracy, 'val_loss': val_loss} + """
+ + +
+[docs] + def validation_epoch_end(self, outputs: List[Any]) -> None: + """ + @ TODO + + x = [[x['val_acc'], x['val_loss']] for x in outputs] + avg_val_acc, avg_loss = torch.tensor(x).mean(dim=0)[:] + self.log('avg_loss_per_epoch', avg_loss, on_epoch=True, prog_bar=True) + self.log('avg_val_acc_per_epoch', avg_val_acc, on_epoch=True, prog_bar=True) + """
+ + +
+[docs] + def test_step(self, batch, batch_idx): + """ + @ TODO + + if len(batch) == 4: + h, r, t, y_batch = batch + predictions = self.forward_triples(h, r, t) + else: + h, x, y_batch = batch[:, 0], batch[:, 1], batch[:, 2] + predictions = self.forward_k_vs_all(h, x) + test_accuracy = accuracy(predictions, y_batch) + return {'test_accuracy': test_accuracy} + """
+ + +
+[docs] + def test_epoch_end(self, outputs: List[Any]): + """ + @ TODO + avg_test_accuracy = torch.stack([x['test_accuracy'] for x in outputs]).mean() + self.log('avg_test_accuracy', avg_test_accuracy, on_epoch=True, prog_bar=True) + """
+ + +
+[docs] + def test_dataloader(self) -> None: + pass
+ + +
+[docs] + def val_dataloader(self) -> None: + pass
+ + +
+[docs] + def predict_dataloader(self) -> None: + pass
+ + +
+[docs] + def train_dataloader(self) -> None: + pass
+ + +
+[docs] + def get_triple_representation(self, idx_hrt): + # (1) Split input into indexes. + idx_head_entity, idx_relation, idx_tail_entity = idx_hrt[:, 0], idx_hrt[:, 1], idx_hrt[:, 2] + # (2) Retrieve embeddings & Apply Dropout & Normalization + head_ent_emb = self.normalize_head_entity_embeddings( + self.input_dp_ent_real(self.entity_embeddings(idx_head_entity))) + rel_ent_emb = self.normalize_relation_embeddings(self.input_dp_rel_real(self.relation_embeddings(idx_relation))) + tail_ent_emb = self.normalize_tail_entity_embeddings(self.entity_embeddings(idx_tail_entity)) + return head_ent_emb, rel_ent_emb, tail_ent_emb
+ + +
+[docs] + def get_head_relation_representation(self, indexed_triple): + # (1) Split input into indexes. + idx_head_entity, idx_relation = indexed_triple[:, 0], indexed_triple[:, 1] + # (2) Retrieve embeddings & Apply Dropout & Normalization + head_ent_emb = self.normalize_head_entity_embeddings( + self.input_dp_ent_real(self.entity_embeddings(idx_head_entity))) + rel_ent_emb = self.normalize_relation_embeddings(self.input_dp_rel_real(self.relation_embeddings(idx_relation))) + return head_ent_emb, rel_ent_emb
+
+ + + +
+[docs] +class IdentityClass(torch.nn.Module): + def __init__(self, args=None): + super().__init__() + self.args = args + +
+[docs] + @staticmethod + def forward(x): + return x
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/clifford.html b/_modules/dicee/models/clifford.html new file mode 100644 index 00000000..f7e852a0 --- /dev/null +++ b/_modules/dicee/models/clifford.html @@ -0,0 +1,1014 @@ + + + + + + + + dicee.models.clifford - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.clifford

+from .base_model import BaseKGE
+import torch
+
+
+
+[docs] +class CMult(BaseKGE): + """ + Cl_(0,0) => Real Numbers + + + Cl_(0,1) => + A multivector \mathbf{a} = a_0 + a_1 e_1 + A multivector \mathbf{b} = b_0 + b_1 e_1 + + multiplication is isomorphic to the product of two complex numbers + + \mathbf{a} \times \mathbf{b} = a_0 b_0 + a_0b_1 e1 + a_1 b_1 e_1 e_1 + = (a_0 b_0 - a_1 b_1) + (a_0 b_1 + a_1 b_0) e_1 + Cl_(2,0) => + A multivector \mathbf{a} = a_0 + a_1 e_1 + a_2 e_2 + a_{12} e_1 e_2 + A multivector \mathbf{b} = b_0 + b_1 e_1 + b_2 e_2 + b_{12} e_1 e_2 + + \mathbf{a} \times \mathbf{b} = a_0b_0 + a_0b_1 e_1 + a_0b_2e_2 + a_0 b_12 e_1 e_2 + + a_1 b_0 e_1 + a_1b_1 e_1_e1 .. + + Cl_(0,2) => Quaternions + """ + + def __init__(self, args): + super().__init__(args) + self.name = 'CMult' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + self.p = self.args['p'] + self.q = self.args['q'] + if self.p is None: + self.p = 0 + if self.q is None: + self.q = 0 + print(f'\tp:{self.p}\tq:{self.q}') + +
+[docs] + def clifford_mul(self, x: torch.FloatTensor, y: torch.FloatTensor, p: int, q: int) -> tuple: + """ + Clifford multiplication Cl_{p,q} (\mathbb{R}) + + ei ^2 = +1 for i =< i =< p + ej ^2 = -1 for p < j =< p+q + ei ej = -eje1 for i \neq j + + Parameter + --------- + x: torch.FloatTensor with (n,d) shape + + y: torch.FloatTensor with (n,d) shape + + p: a non-negative integer p>= 0 + q: a non-negative integer q>= 0 + + + + Returns + ------- + + """ + + if p == q == 0: + return x * y + elif (p == 1 and q == 0) or (p == 0 and q == 1): + # {1,e1} e_i^2 = +1 for i + a0, a1 = torch.hsplit(x, 2) + b0, b1 = torch.hsplit(y, 2) + if p == 1 and q == 0: + ab0 = a0 * b0 + a1 * b1 + ab1 = a0 * b1 + a1 * b0 + else: + ab0 = a0 * b0 - a1 * b1 + ab1 = a0 * b1 + a1 * b0 + return ab0, ab1 + elif (p == 2 and q == 0) or (p == 0 and q == 2): + a0, a1, a2, a12 = torch.hsplit(x, 4) + b0, b1, b2, b12 = torch.hsplit(y, 4) + if p == 2 and q == 0: + ab0 = a0 * b0 + a1 * b1 + a2 * b2 - a12 * b12 + ab1 = a0 * b1 + a1 * b0 - a2 * b12 + a12 * b2 + ab2 = a0 * b2 + a1 * b12 + a2 * b0 - a12 * b1 + ab12 = a0 * b12 + a1 * b2 - a2 * b1 + a12 * b0 + else: + ab0 = a0 * b0 - a1 * b1 - a2 * b2 - a12 * b12 + ab1 = a0 * b1 + a1 * b0 + a2 * b12 - a12 * b2 + ab2 = a0 * b2 - a1 * b12 + a2 * b0 + a12 * b1 + ab12 = a0 * b12 + a1 * b2 - a2 * b1 + a12 * b0 + return ab0, ab1, ab2, ab12 + elif p == 1 and q == 1: + a0, a1, a2, a12 = torch.hsplit(x, 4) + b0, b1, b2, b12 = torch.hsplit(y, 4) + + ab0 = a0 * b0 + a1 * b1 - a2 * b2 + a12 * b12 + ab1 = a0 * b1 + a1 * b0 + a2 * b12 - a12 * b2 + ab2 = a0 * b2 + a1 * b12 + a2 * b0 - a12 * b1 + ab12 = a0 * b12 + a1 * b2 - a2 * b1 + a12 * b0 + return ab0, ab1, ab2, ab12 + elif p == 3 and q == 0: + # cl3,0 no 0,3 + a0, a1, a2, a3, a12, a13, a23, a123 = torch.hsplit(x, 8) + b0, b1, b2, b3, b12, b13, b23, b123 = torch.hsplit(y, 8) + + ab0 = a0 * b0 + a1 * b1 + a2 * b2 + a3 * b3 - a12 * b12 - a13 * b13 - a23 * b23 - a123 * b123 + ab1 = a0 * b1 + a1 * b0 - a2 * b12 - a3 * b13 + a12 * b2 + a13 * b3 - a23 * b123 - a123 * b23 + ab2 = a0 * b2 + a1 * b12 + a2 * b0 - a3 * b23 - a12 * b1 + a13 * b123 + a23 * b3 + a123 * b13 + ab3 = a0 * b3 + a1 * b13 + a2 * b23 + a3 * b0 - a12 * b123 - a13 * b1 - a23 * b2 - a123 * b12 + ab12 = a0 * b12 + a1 * b2 - a2 * b1 + a3 * b123 + a12 * b0 - a13 * b23 + a23 * b13 + a123 * b3 + ab13 = a0 * b13 + a1 * b3 - a2 * b123 - a3 * b1 + a12 * b23 + a13 * b0 - a23 * b12 - a123 * b2 + ab23 = a0 * b23 + a1 * b123 + a2 * b3 - a3 * b2 - a12 * b13 - a13 * b12 + a23 * b0 + a123 * b1 + ab123 = a0 * b123 + a1 * b23 - a2 * b13 + a3 * b12 + a12 * b3 - a13 * b2 + a23 * b1 + a123 * b0 + return ab0, ab1, ab2, ab3, ab12, ab13, ab23, ab123 + else: + raise NotImplementedError
+ + +
+[docs] + def score(self, head_ent_emb, rel_ent_emb, tail_ent_emb): + ab = self.clifford_mul(x=head_ent_emb, y=rel_ent_emb, p=self.p, q=self.q) + + if self.p == self.q == 0: + return torch.einsum('bd,bd->b', ab, tail_ent_emb) + elif (self.p == 1 and self.q == 0) or (self.p == 0 and self.q == 1): + ab0, ab1 = ab + c0, c1 = torch.hsplit(tail_ent_emb, 2) + return torch.einsum('bd,bd->b', ab0, c0) + torch.einsum('bd,bd->b', ab1, c1) + elif (self.p == 2 and self.q == 0) or (self.p == 0 and self.q == 2): + ab0, ab1, ab2, ab12 = ab + c0, c1, c2, c12 = torch.hsplit(tail_ent_emb, 4) + return torch.einsum('bd,bd->b', ab0, c0) \ + + torch.einsum('bd,bd->b', ab1, c1) \ + + torch.einsum('bd,bd->b', ab2, c2) \ + + torch.einsum('bd,bd->b', ab12, c12) + else: + raise NotImplementedError
+ + +
+[docs] + def forward_triples(self, x: torch.LongTensor) -> torch.FloatTensor: + """ + Compute batch triple scores + + Parameter + --------- + x: torch.LongTensor with shape n by 3 + + + Returns + ------- + torch.LongTensor with shape n + + """ + + # (1) Retrieve real-valued embedding vectors. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + ab = self.clifford_mul(x=head_ent_emb, y=rel_ent_emb, p=self.p, q=self.q) + + if self.p == self.q == 0: + return torch.einsum('bd,bd->b', ab, tail_ent_emb) + elif (self.p == 1 and self.q == 0) or (self.p == 0 and self.q == 1): + ab0, ab1 = ab + c0, c1 = torch.hsplit(tail_ent_emb, 2) + return torch.einsum('bd,bd->b', ab0, c0) + torch.einsum('bd,bd->b', ab1, c1) + elif (self.p == 2 and self.q == 0) or (self.p == 0 and self.q == 2): + ab0, ab1, ab2, ab12 = ab + c0, c1, c2, c12 = torch.hsplit(tail_ent_emb, 4) + return torch.einsum('bd,bd->b', ab0, c0) \ + + torch.einsum('bd,bd->b', ab1, c1) \ + + torch.einsum('bd,bd->b', ab2, c2) \ + + torch.einsum('bd,bd->b', ab12, c12) + else: + raise NotImplementedError
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor) -> torch.FloatTensor: + """ + Compute batch KvsAll triple scores + + Parameter + --------- + x: torch.LongTensor with shape n by 3 + + + Returns + ------- + torch.LongTensor with shape n + + """ + # (1) Retrieve embedding vectors of heads and relations. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) CL multiply (1). + ab = self.clifford_mul(x=head_ent_emb, y=rel_ent_emb, p=self.p, q=self.q) + # (3) Inner product of (2) and all entity embeddings. + if self.p == self.q == 0: + return torch.mm(ab, self.entity_embeddings.weight.transpose(1, 0)) + elif (self.p == 1 and self.q == 0) or (self.p == 0 and self.q == 1): + ab0, ab1 = ab + c0, c1 = torch.hsplit(self.entity_embeddings.weight, 2) + return torch.mm(ab0, c0.transpose(1, 0)) + torch.mm(ab1, c1.transpose(1, 0)) + elif (self.p == 2 and self.q == 0) or (self.p == 0 and self.q == 2): + ab0, ab1, ab2, ab12 = ab + c0, c1, c2, c12 = torch.hsplit(self.entity_embeddings.weight, 4) + return torch.mm(ab0, c0.transpose(1, 0)) + \ + torch.mm(ab1, c1.transpose(1, 0)) + torch.mm(ab2, c2.transpose(1, 0)) + torch.mm( + ab12, c12.transpose(1, 0)) + elif self.p == 3 and self.q == 0: + + ab0, ab1, ab2, ab3, ab12, ab13, ab23, ab123 = ab + c0, c1, c2, c3, c12, c13, c23, c123 = torch.hsplit(self.entity_embeddings.weight, 8) + + return torch.mm(ab0, c0.transpose(1, 0)) \ + + torch.mm(ab1, c1.transpose(1, 0)) \ + + torch.mm(ab2, c2.transpose(1, 0)) \ + + torch.mm(ab3, c3.transpose(1, 0)) + \ + torch.mm(ab12, c3.transpose(1, 0)) + torch.mm(ab13, c13.transpose(1, 0)) \ + + torch.mm(ab23, c23.transpose(1, 0)) + torch.mm(ab123, c123.transpose(1, 0)) + elif self.p == 1 and self.q == 1: + ab0, ab1, ab2, ab12 = ab + c0, c1, c2, c12 = torch.hsplit(self.entity_embeddings.weight, 4) + return torch.mm(ab0, c0.transpose(1, 0)) + torch.mm(ab1, c1.transpose(1, 0)) + \ + torch.mm(ab2, c2.transpose(1, 0)) + torch.mm( + ab12, c12.transpose(1, 0)) + + else: + raise NotImplementedError
+
+ + + +
+[docs] +class Keci(BaseKGE): + def __init__(self, args): + super().__init__(args) + self.name = 'Keci' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + + self.p = self.args.get("p", 0) + self.q = self.args.get("q", 0) + if self.p is None: + self.p = 0 + if self.q is None: + self.q = 0 + self.r = self.embedding_dim / (self.p + self.q + 1) + try: + assert self.r.is_integer() + except AssertionError: + raise AssertionError(f'r = embedding_dim / (p + q+ 1) must be a whole number\n' + f'Currently {self.r}={self.embedding_dim} / ({self.p}+ {self.q} +1)') + self.r = int(self.r) + self.requires_grad_for_interactions = True + print(f'r:{self.r}\t p:{self.p}\t q:{self.q}') + # Initialize parameters for dimension scaling + if self.p > 0: + self.p_coefficients = torch.nn.Embedding(num_embeddings=1, embedding_dim=self.p) + torch.nn.init.zeros_(self.p_coefficients.weight) + if self.q > 0: + self.q_coefficients = torch.nn.Embedding(num_embeddings=1, embedding_dim=self.q) + torch.nn.init.zeros_(self.q_coefficients.weight) + +
+[docs] + def compute_sigma_pp(self, hp, rp): + """ + Compute sigma_{pp} = \sum_{i=1}^{p-1} \sum_{k=i+1}^p (h_i r_k - h_k r_i) e_i e_k + + sigma_{pp} captures the interactions between along p bases + For instance, let p e_1, e_2, e_3, we compute interactions between e_1 e_2, e_1 e_3 , and e_2 e_3 + This can be implemented with a nested two for loops + + results = [] + for i in range(p - 1): + for k in range(i + 1, p): + results.append(hp[:, :, i] * rp[:, :, k] - hp[:, :, k] * rp[:, :, i]) + sigma_pp = torch.stack(results, dim=2) + assert sigma_pp.shape == (b, r, int((p * (p - 1)) / 2)) + + Yet, this computation would be quite inefficient. Instead, we compute interactions along all p, + e.g., e1e1, e1e2, e1e3, + e2e1, e2e2, e2e3, + e3e1, e3e2, e3e3 + Then select the triangular matrix without diagonals: e1e2, e1e3, e2e3. + """ + # Compute indexes for the upper triangle of p by p matrix + indices = torch.triu_indices(self.p, self.p, offset=1) + # Compute p by p operations + sigma_pp = torch.einsum('nrp,nrx->nrpx', hp, rp) - torch.einsum('nrx,nrp->nrpx', hp, rp) + sigma_pp = sigma_pp[:, :, indices[0], indices[1]] + return sigma_pp
+ + +
+[docs] + def compute_sigma_qq(self, hq, rq): + """ + Compute sigma_{qq} = \sum_{j=1}^{p+q-1} \sum_{k=j+1}^{p+q} (h_j r_k - h_k r_j) e_j e_k + sigma_{q} captures the interactions between along q bases + For instance, let q e_1, e_2, e_3, we compute interactions between e_1 e_2, e_1 e_3 , and e_2 e_3 + This can be implemented with a nested two for loops + + results = [] + for j in range(q - 1): + for k in range(j + 1, q): + results.append(hq[:, :, j] * rq[:, :, k] - hq[:, :, k] * rq[:, :, j]) + sigma_qq = torch.stack(results, dim=2) + assert sigma_qq.shape == (b, r, int((q * (q - 1)) / 2)) + + Yet, this computation would be quite inefficient. Instead, we compute interactions along all p, + e.g., e1e1, e1e2, e1e3, + e2e1, e2e2, e2e3, + e3e1, e3e2, e3e3 + Then select the triangular matrix without diagonals: e1e2, e1e3, e2e3. + """ + # Compute indexes for the upper triangle of p by p matrix + if self.q > 1: + indices = torch.triu_indices(self.q, self.q, offset=1) + # Compute p by p operations + sigma_qq = torch.einsum('nrp,nrx->nrpx', hq, rq) - torch.einsum('nrx,nrp->nrpx', hq, rq) + sigma_qq = sigma_qq[:, :, indices[0], indices[1]] + else: + sigma_qq = torch.zeros((len(hq), self.r, int((self.q * (self.q - 1)) / 2))) + + return sigma_qq
+ + +
+[docs] + def compute_sigma_pq(self, *, hp, hq, rp, rq): + """ + \sum_{i=1}^{p} \sum_{j=p+1}^{p+q} (h_i r_j - h_j r_i) e_i e_j + + results = [] + sigma_pq = torch.zeros(b, r, p, q) + for i in range(p): + for j in range(q): + sigma_pq[:, :, i, j] = hp[:, :, i] * rq[:, :, j] - hq[:, :, j] * rp[:, :, i] + print(sigma_pq.shape) + + """ + sigma_pq = torch.einsum('nrp,nrq->nrpq', hp, rq) - torch.einsum('nrq,nrp->nrpq', hq, rp) + assert sigma_pq.shape[1:] == (self.r, self.p, self.q) + return sigma_pq
+ + +
+[docs] + def clifford_multiplication(self, h0, hp, hq, r0, rp, rq): + """ Compute our CL multiplication + + h = h_0 + \sum_{i=1}^p h_i e_i + \sum_{j=p+1}^{p+q} h_j e_j + r = r_0 + \sum_{i=1}^p r_i e_i + \sum_{j=p+1}^{p+q} r_j e_j + + ei ^2 = +1 for i =< i =< p + ej ^2 = -1 for p < j =< p+q + ei ej = -eje1 for i \neq j + + h r = sigma_0 + sigma_p + sigma_q + sigma_{pp} + sigma_{q}+ sigma_{pq} + where + (1) sigma_0 = h_0 r_0 + \sum_{i=1}^p (h_0 r_i) e_i - \sum_{j=p+1}^{p+q} (h_j r_j) e_j + + (2) sigma_p = \sum_{i=1}^p (h_0 r_i + h_i r_0) e_i + + (3) sigma_q = \sum_{j=p+1}^{p+q} (h_0 r_j + h_j r_0) e_j + + (4) sigma_{pp} = \sum_{i=1}^{p-1} \sum_{k=i+1}^p (h_i r_k - h_k r_i) e_i e_k + + (5) sigma_{qq} = \sum_{j=1}^{p+q-1} \sum_{k=j+1}^{p+q} (h_j r_k - h_k r_j) e_j e_k + + (6) sigma_{pq} = \sum_{i=1}^{p} \sum_{j=p+1}^{p+q} (h_i r_j - h_j r_i) e_i e_j + + """ + n = len(h0) + assert h0.shape == (n, self.r) == r0.shape == (n, self.r) + assert hp.shape == (n, self.r, self.p) == rp.shape == (n, self.r, self.p) + assert hq.shape == (n, self.r, self.q) == rq.shape == (n, self.r, self.q) + # (1) + sigma_0 = h0 * r0 + torch.sum(hp * rp, dim=2) - torch.sum(hq * rq, dim=2) + assert sigma_0.shape == (n, self.r) + # (2) + sigma_p = torch.einsum('nr,nrp->nrp', h0, rp) + torch.einsum('nr,nrp->nrp', r0, hp) + assert sigma_p.shape == (n, self.r, self.p) + # (3) + sigma_q = torch.einsum('nr,nrq->nrq', h0, rq) + torch.einsum('nr,nrq->nrq', r0, hq) + # (4) + sigma_pp = self.compute_sigma_pp(hp, rp) + # (5) + sigma_qq = self.compute_sigma_qq(hq, rq) + # (6) + sigma_pq = torch.einsum('bkp,bkq->bkpq', hp, rq) - torch.einsum('bkp,bkq->bkpq', rp, hq) + assert sigma_pq.shape == (n, self.r, self.p, self.q) + + return sigma_0, sigma_p, sigma_q, sigma_pp, sigma_qq, sigma_pq
+ + +
+[docs] + def forward_k_vs_with_explicit(self, x: torch.Tensor): + n = len(x) + # (1) Retrieve real-valued embedding vectors. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Construct multi-vector in Cl_{p,q} (\mathbb{R}^d) for head entities and relations + h0, hp, hq = self.construct_cl_multivector(head_ent_emb, r=self.r, p=self.p, q=self.q) + r0, rp, rq = self.construct_cl_multivector(rel_ent_emb, r=self.r, p=self.p, q=self.q) + E = self.entity_embeddings.weight + + # Clifford mul. + sigma_0 = h0 * r0 + torch.sum(hp * rp, dim=2) - torch.sum(hq * rq, dim=2) + sigma_p = torch.einsum('nr,nrp->nrp', h0, rp) + torch.einsum('nrp, nr->nrp', hp, r0) + sigma_q = torch.einsum('nr,nrq->nrq', h0, rq) + torch.einsum('nrq, nr->nrq', hq, r0) + + t0 = E[:, :self.r] + + score_sigma_0 = sigma_0 @ t0.transpose(1, 0) + if self.p > 0: + tp = E[:, self.r: self.r + (self.r * self.p)].view(self.num_entities, self.r, self.p) + score_sigma_p = torch.einsum('bkp,ekp->be', sigma_p, tp) + else: + score_sigma_p = 0 + if self.q > 0: + tq = E[:, -(self.r * self.q):].view(self.num_entities, self.r, self.q) + score_sigma_q = torch.einsum('bkp,ekp->be', sigma_q, tq) + else: + score_sigma_q = 0 + + # Compute sigma_pp sigma_qq and sigma_pq + if self.p > 1: + results = [] + for i in range(self.p - 1): + for k in range(i + 1, self.p): + results.append(hp[:, :, i] * rp[:, :, k] - hp[:, :, k] * rp[:, :, i]) + sigma_pp = torch.stack(results, dim=2) + assert sigma_pp.shape == (n, self.r, int((self.p * (self.p - 1)) / 2)) + sigma_pp = torch.sum(sigma_pp, dim=[1, 2]).view(n, 1) + del results + else: + sigma_pp = 0 + + if self.q > 1: + results = [] + for j in range(self.q - 1): + for k in range(j + 1, self.q): + results.append(hq[:, :, j] * rq[:, :, k] - hq[:, :, k] * rq[:, :, j]) + sigma_qq = torch.stack(results, dim=2) + del results + assert sigma_qq.shape == (n, self.r, int((self.q * (self.q - 1)) / 2)) + sigma_qq = torch.sum(sigma_qq, dim=[1, 2]).view(n, 1) + else: + sigma_qq = 0 + + if self.p >= 1 and self.q >= 1: + sigma_pq = torch.zeros(n, self.r, self.p, self.q) + for i in range(self.p): + for j in range(self.q): + sigma_pq[:, :, i, j] = hp[:, :, i] * rq[:, :, j] - hq[:, :, j] * rp[:, :, i] + sigma_pq = torch.sum(sigma_pq, dim=[1, 2, 3]).view(n, 1) + else: + sigma_pq = 0 + + return score_sigma_0 + score_sigma_p + score_sigma_q + sigma_pp + sigma_qq + sigma_pq
+ + +
+[docs] + def apply_coefficients(self, h0, hp, hq, r0, rp, rq): + """ Multiplying a base vector with its scalar coefficient """ + if self.p > 0: + hp = hp * self.p_coefficients.weight + rp = rp * self.p_coefficients.weight + if self.q > 0: + hq = hq * self.q_coefficients.weight + rq = rq * self.q_coefficients.weight + return h0, hp, hq, r0, rp, rq
+ + +
+[docs] + def construct_cl_multivector(self, x: torch.FloatTensor, r: int, p: int, q: int) -> tuple[ + torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: + """ + Construct a batch of multivectors Cl_{p,q}(\mathbb{R}^d) + + Parameter + --------- + x: torch.FloatTensor with (n,d) shape + + Returns + ------- + a0: torch.FloatTensor with (n,r) shape + ap: torch.FloatTensor with (n,r,p) shape + aq: torch.FloatTensor with (n,r,q) shape + """ + batch_size, d = x.shape + # (1) A_{n \times k}: take the first k columns + a0 = x[:, :r].view(batch_size, r) + # (2) B_{n \times p}, C_{n \times q}: take the self.k * self.p columns after the k. column + if p > 0: + ap = x[:, r: r + (r * p)].view(batch_size, r, p) + else: + ap = torch.zeros((batch_size, r, p), device=self.device) + if q > 0: + # (3) B_{n \times p}, C_{n \times q}: take the last self.r * self.q . + aq = x[:, -(r * q):].view(batch_size, r, q) + else: + aq = torch.zeros((batch_size, r, q), device=self.device) + return a0, ap, aq
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor) -> torch.FloatTensor: + """ + Kvsall training + + (1) Retrieve real-valued embedding vectors for heads and relations \mathbb{R}^d . + (2) Construct head entity and relation embeddings according to Cl_{p,q}(\mathbb{R}^d) . + (3) Perform Cl multiplication + (4) Inner product of (3) and all entity embeddings + + forward_k_vs_with_explicit and this funcitons are identical + Parameter + --------- + x: torch.LongTensor with (n,2) shape + Returns + ------- + torch.FloatTensor with (n, |E|) shape + """ + # (1) Retrieve real-valued embedding vectors. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Construct multi-vector in Cl_{p,q} (\mathbb{R}^d) for head entities and relations + h0, hp, hq = self.construct_cl_multivector(head_ent_emb, r=self.r, p=self.p, q=self.q) + r0, rp, rq = self.construct_cl_multivector(rel_ent_emb, r=self.r, p=self.p, q=self.q) + + h0, hp, hq, h0, rp, rq = self.apply_coefficients(h0, hp, hq, h0, rp, rq) + # (3) Extract all entity embeddings + E = self.entity_embeddings.weight + # (3.1) Extract real part + t0 = E[:, :self.r] + # (4) Compute a triple score based on interactions described by the basis 1. Eq. 20 + h0r0t0 = torch.einsum('br,er->be', h0 * r0, t0) + + # (5) Compute a triple score based on interactions described by the bases of p {e_1, ..., e_p}. Eq. 21 + if self.p > 0: + tp = E[:, self.r: self.r + (self.r * self.p)].view(self.num_entities, self.r, self.p) + hp_rp_t0 = torch.einsum('brp, er -> be', hp * rp, t0) + h0_rp_tp = torch.einsum('brp, erp -> be', torch.einsum('br, brp -> brp', h0, rp), tp) + hp_r0_tp = torch.einsum('brp, erp -> be', torch.einsum('brp, br -> brp', hp, r0), tp) + score_p = hp_rp_t0 + h0_rp_tp + hp_r0_tp + else: + score_p = 0 + + # (5) Compute a triple score based on interactions described by the bases of q {e_{p+1}, ..., e_{p+q}}. Eq. 22 + if self.q > 0: + tq = E[:, -(self.r * self.q):].view(self.num_entities, self.r, self.q) + h0_rq_tq = torch.einsum('brq, erq -> be', torch.einsum('br, brq -> brq', h0, rq), tq) + hq_r0_tq = torch.einsum('brq, erq -> be', torch.einsum('brq, br -> brq', hq, r0), tq) + hq_rq_t0 = torch.einsum('brq, er -> be', hq * rq, t0) + score_q = h0_rq_tq + hq_r0_tq - hq_rq_t0 + else: + score_q = 0 + + if self.p >= 2: + sigma_pp = torch.sum(self.compute_sigma_pp(hp, rp), dim=[1, 2]).unsqueeze(-1) + else: + sigma_pp = 0 + + if self.q >= 2: + sigma_qq = torch.sum(self.compute_sigma_qq(hq, rq), dim=[1, 2]).unsqueeze(-1) + else: + sigma_qq = 0 + + if self.p >= 2 and self.q >= 2: + sigma_pq = torch.sum(self.compute_sigma_pq(hp=hp, hq=hq, rp=rp, rq=rq), dim=[1, 2, 3]).unsqueeze(-1) + else: + sigma_pq = 0 + return h0r0t0 + score_p + score_q + sigma_pp + sigma_qq + sigma_pq
+ + +
+[docs] + def score(self, h, r, t): + # (2) Construct multi-vector in Cl_{p,q} (\mathbb{R}^d) for head entities and relations + h0, hp, hq = self.construct_cl_multivector(h, r=self.r, p=self.p, q=self.q) + r0, rp, rq = self.construct_cl_multivector(r, r=self.r, p=self.p, q=self.q) + t0, tp, tq = self.construct_cl_multivector(t, r=self.r, p=self.p, q=self.q) + + if self.q > 0: + self.q_coefficients = self.q_coefficients.to(h0.device, non_blocking=True) + + h0, hp, hq, h0, rp, rq = self.apply_coefficients(h0, hp, hq, h0, rp, rq) + # (4) Compute a triple score based on interactions described by the basis 1. Eq. 20 + h0r0t0 = torch.einsum('br, br -> b', h0 * r0, t0) + + # (5) Compute a triple score based on interactions described by the bases of p {e_1, ..., e_p}. Eq. 21 + if self.p > 0: + # Second term in Eq.16 + hp_rp_t0 = torch.einsum('brp, br -> b', hp * rp, t0) + # Eq. 17 + # b=e + h0_rp_tp = torch.einsum('brp, erp -> b', torch.einsum('br, brp -> brp', h0, rp), tp) + hp_r0_tp = torch.einsum('brp, erp -> b', torch.einsum('brp, br -> brp', hp, r0), tp) + + score_p = hp_rp_t0 + h0_rp_tp + hp_r0_tp + else: + score_p = 0 + + # (5) Compute a triple score based on interactions described by the bases of q {e_{p+1}, ..., e_{p+q}}. Eq. 22 + if self.q > 0: + # Third item in Eq 16. + hq_rq_t0 = torch.einsum('brq, br -> b', hq * rq, t0) + # Eq. 18. + h0_rq_tq = torch.einsum('br, brq -> b', h0, rq * tq) + r0_hq_tq = torch.einsum('br, brq -> b', r0, hq * tq) + score_q = - hq_rq_t0 + (h0_rq_tq + r0_hq_tq) + else: + score_q = 0 + + if self.p >= 2: + sigma_pp = torch.sum(self.compute_sigma_pp(hp, rp), dim=[1, 2]).unsqueeze(-1) + else: + sigma_pp = 0 + + if self.q >= 2: + sigma_qq = torch.sum(self.compute_sigma_qq(hq, rq), dim=[1, 2]).unsqueeze(-1) + else: + sigma_qq = 0 + + if self.p >= 2 and self.q >= 2: + sigma_pq = torch.sum(self.compute_sigma_pq(hp=hp, hq=hq, rp=rp, rq=rq), dim=[1, 2, 3]).unsqueeze(-1) + else: + sigma_pq = 0 + return h0r0t0 + score_p + score_q + sigma_pp + sigma_qq + sigma_pq
+ + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.FloatTensor: + """ + + Parameter + --------- + x: torch.LongTensor with (n,3) shape + + Returns + ------- + torch.FloatTensor with (n) shape + """ + # (1) Retrieve real-valued embedding vectors. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Construct multi-vector in Cl_{p,q} (\mathbb{R}^d) for head entities and relations + h0, hp, hq = self.construct_cl_multivector(head_ent_emb, r=self.r, p=self.p, q=self.q) + r0, rp, rq = self.construct_cl_multivector(rel_ent_emb, r=self.r, p=self.p, q=self.q) + t0, tp, tq = self.construct_cl_multivector(tail_ent_emb, r=self.r, p=self.p, q=self.q) + h0, hp, hq, h0, rp, rq = self.apply_coefficients(h0, hp, hq, h0, rp, rq) + # (4) Compute a triple score based on interactions described by the basis 1. Eq. 20 + h0r0t0 = torch.einsum('br, br -> b', h0 * r0, t0) + + # (5) Compute a triple score based on interactions described by the bases of p {e_1, ..., e_p}. Eq. 21 + if self.p > 0: + # Second term in Eq.16 + hp_rp_t0 = torch.einsum('brp, br -> b', hp * rp, t0) + # Eq. 17 + # b=e + h0_rp_tp = torch.einsum('brp, erp -> b', torch.einsum('br, brp -> brp', h0, rp), tp) + hp_r0_tp = torch.einsum('brp, erp -> b', torch.einsum('brp, br -> brp', hp, r0), tp) + + score_p = hp_rp_t0 + h0_rp_tp + hp_r0_tp + else: + score_p = 0 + + # (5) Compute a triple score based on interactions described by the bases of q {e_{p+1}, ..., e_{p+q}}. Eq. 22 + if self.q > 0: + # Third item in Eq 16. + hq_rq_t0 = torch.einsum('brq, br -> b', hq * rq, t0) + # Eq. 18. + h0_rq_tq = torch.einsum('br, brq -> b', h0, rq * tq) + r0_hq_tq = torch.einsum('br, brq -> b', r0, hq * tq) + score_q = - hq_rq_t0 + (h0_rq_tq + r0_hq_tq) + else: + score_q = 0 + + if self.p >= 2: + sigma_pp = torch.sum(self.compute_sigma_pp(hp, rp), dim=[1, 2]).unsqueeze(-1) + else: + sigma_pp = 0 + + if self.q >= 2: + sigma_qq = torch.sum(self.compute_sigma_qq(hq, rq), dim=[1, 2]).unsqueeze(-1) + else: + sigma_qq = 0 + + if self.p >= 2 and self.q >= 2: + sigma_pq = torch.sum(self.compute_sigma_pq(hp=hp, hq=hq, rp=rp, rq=rq), dim=[1, 2, 3]).unsqueeze(-1) + else: + sigma_pq = 0 + return h0r0t0 + score_p + score_q + sigma_pp + sigma_qq + sigma_pq
+ + +
+[docs] + def forward_k_vs_sample(self, x: torch.LongTensor, target_entity_idx: torch.LongTensor) -> torch.FloatTensor: + """ + Kvsall training + + (1) Retrieve real-valued embedding vectors for heads and relations \mathbb{R}^d . + (2) Construct head entity and relation embeddings according to Cl_{p,q}(\mathbb{R}^d) . + (3) Perform Cl multiplication + (4) Inner product of (3) and all entity embeddings + + Parameter + --------- + x: torch.LongTensor with (n,2) shape + + Returns + ------- + torch.FloatTensor with (n, |E|) shape + """ + # (1) Retrieve real-valued embedding vectors. + head_ent_emb, rel_emb = self.get_head_relation_representation(x) + # (2) Construct multi-vector in Cl_{p,q} (\mathbb{R}^d) for head entities. + a0, ap, aq = self.construct_cl_multivector(head_ent_emb, r=self.r, p=self.p, q=self.q) + # (2) Construct multi-vector in Cl_{p,q} (\mathbb{R}^d) for relations. + b0, bp, bq = self.construct_cl_multivector(rel_emb, r=self.r, p=self.p, q=self.q) + + # (4) Clifford multiplication of (2) and (3). + # AB_pp, AB_qq, AB_pq + # AB_0, AB_p, AB_q, AB_pp, AB_qq, AB_pq = self.clifford_mul_reduced_interactions(a0, ap, aq, b0, bp, bq) + AB_0, AB_p, AB_q, AB_pp, AB_qq, AB_pq = self.clifford_mul(a0, ap, aq, b0, bp, bq) + + # b e r + selected_tail_entity_embeddings = self.entity_embeddings(target_entity_idx) + # (7) Inner product of AB_0 and a0 of all entities. + A_score = torch.einsum('br,ber->be', AB_0, selected_tail_entity_embeddings[:, :self.r]) + + # (8) Inner product of AB_p and ap of all entities. + if self.p > 0: + B_score = torch.einsum('brp,berp->be', AB_p, + selected_tail_entity_embeddings[:, self.r: self.r + (self.r * self.p)] + .view(self.num_entities, self.r, self.p)) + else: + B_score = 0 + # (9) Inner product of AB_q and aq of all entities. + if self.q > 0: + C_score = torch.einsum('brq,berq->be', AB_q, + selected_tail_entity_embeddings[:, -(self.r * self.q):] + .view(self.num_entities, self.r, self.q)) + else: + C_score = 0 + # (10) Aggregate (7,8,9). + A_B_C_score = A_score + B_score + C_score + # (11) Compute inner products of AB_pp, AB_qq, AB_pq and respective identity matrices of all entities. + D_E_F_score = (torch.einsum('brpp->b', AB_pp) + torch.einsum('brqq->b', AB_qq) + torch.einsum('brpq->b', AB_pq)) + D_E_F_score = D_E_F_score.view(len(head_ent_emb), 1) + # (12) Score + return A_B_C_score + D_E_F_score
+
+ + + +
+[docs] +class KeciBase(Keci): + " Without learning dimension scaling" + + def __init__(self, args): + super().__init__(args) + self.name = 'KeciBase' + self.requires_grad_for_interactions = False + print(f'r:{self.r}\t p:{self.p}\t q:{self.q}') + if self.p > 0: + self.p_coefficients = torch.nn.Embedding(num_embeddings=1, embedding_dim=self.p) + torch.nn.init.ones_(self.p_coefficients.weight) + if self.q > 0: + self.q_coefficients = torch.nn.Embedding(num_embeddings=1, embedding_dim=self.q) + torch.nn.init.ones_(self.q_coefficients.weight)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/complex.html b/_modules/dicee/models/complex.html new file mode 100644 index 00000000..fecdcd0a --- /dev/null +++ b/_modules/dicee/models/complex.html @@ -0,0 +1,529 @@ + + + + + + + + dicee.models.complex - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.complex

+from typing import Tuple
+import torch
+from .base_model import BaseKGE
+
+
+[docs] +class ConEx(BaseKGE): + """ Convolutional ComplEx Knowledge Graph Embeddings""" + + def __init__(self, args): + super().__init__(args) + self.name = 'ConEx' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + # Convolution + self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_of_output_channels, + kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True) + self.fc_num_input = self.embedding_dim * 2 * self.num_of_output_channels + self.fc1 = torch.nn.Linear(self.fc_num_input, self.embedding_dim) # Hard compression. + self.norm_fc1 = self.normalizer_class(self.embedding_dim) + + self.bn_conv2d = torch.nn.BatchNorm2d(self.num_of_output_channels) + self.feature_map_dropout = torch.nn.Dropout2d(self.feature_map_dropout_rate) + +
+[docs] + def residual_convolution(self, C_1: Tuple[torch.Tensor, torch.Tensor], + C_2: Tuple[torch.Tensor, torch.Tensor]) -> torch.FloatTensor: + """ + Compute residual score of two complex-valued embeddings. + :param C_1: a tuple of two pytorch tensors that corresponds complex-valued embeddings + :param C_2: a tuple of two pytorch tensors that corresponds complex-valued embeddings + :return: + """ + emb_ent_real, emb_ent_imag_i = C_1 + emb_rel_real, emb_rel_imag_i = C_2 + # Think of x a n image of two complex numbers. + x = torch.cat([emb_ent_real.view(-1, 1, 1, self.embedding_dim // 2), + emb_ent_imag_i.view(-1, 1, 1, self.embedding_dim // 2), + emb_rel_real.view(-1, 1, 1, self.embedding_dim // 2), + emb_rel_imag_i.view(-1, 1, 1, self.embedding_dim // 2)], 2) + + x = torch.nn.functional.relu(self.bn_conv2d(self.conv2d(x))) + x = self.feature_map_dropout(x) + x = x.view(x.shape[0], -1) # reshape for NN. + x = torch.nn.functional.relu(self.norm_fc1(self.fc1(x))) + return torch.chunk(x, 2, dim=1)
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + + # (2) Apply convolution operation on (1). + C_3 = self.residual_convolution(C_1=(emb_head_real, emb_head_imag), + C_2=(emb_rel_real, emb_rel_imag)) + a, b = C_3 + emb_tail_real, emb_tail_imag = torch.hsplit(self.entity_embeddings.weight, 2) + emb_tail_real, emb_tail_imag = emb_tail_real.transpose(1, 0), emb_tail_imag.transpose(1, 0) + # (4) + real_real_real = torch.mm(a * emb_head_real * emb_rel_real, emb_tail_real) + real_imag_imag = torch.mm(a * emb_head_real * emb_rel_imag, emb_tail_imag) + imag_real_imag = torch.mm(b * emb_head_imag * emb_rel_real, emb_tail_imag) + imag_imag_real = torch.mm(b * emb_head_imag * emb_rel_imag, emb_tail_real) + return real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
+ + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + emb_tail_real, emb_tail_imag = torch.hsplit(tail_ent_emb, 2) + + # (2) Apply convolution operation on (1). + C_3 = self.residual_convolution(C_1=(emb_head_real, emb_head_imag), + C_2=(emb_rel_real, emb_rel_imag)) + a, b = C_3 + # (3) Compute hermitian inner product. + real_real_real = (a * emb_head_real * emb_rel_real * emb_tail_real).sum(dim=1) + real_imag_imag = (a * emb_head_real * emb_rel_imag * emb_tail_imag).sum(dim=1) + imag_real_imag = (b * emb_head_imag * emb_rel_real * emb_tail_imag).sum(dim=1) + imag_imag_real = (b * emb_head_imag * emb_rel_imag * emb_tail_real).sum(dim=1) + return real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
+ + +
+[docs] + def forward_k_vs_sample(self, x: torch.Tensor, target_entity_idx: torch.Tensor): + # @OTOD: Double check later. + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + # (3) Apply convolution operation on (2). + C_3 = self.residual_convolution(C_1=(emb_head_real, emb_head_imag), + C_2=(emb_rel_real, emb_rel_imag)) + a, b = C_3 + + # (batch size, num. selected entity, dimension) + # tail_entity_emb = self.normalize_tail_entity_embeddings(self.entity_embeddings(target_entity_idx)) + tail_entity_emb = self.entity_embeddings(target_entity_idx) + # complex vectors + emb_tail_real, emb_tail_i = torch.tensor_split(tail_entity_emb, 2, dim=2) + + emb_tail_real = emb_tail_real.transpose(1, 2) + emb_tail_i = emb_tail_i.transpose(1, 2) + + real_real_real = torch.bmm((a * emb_head_real * emb_rel_real).unsqueeze(1), emb_tail_real) + real_imag_imag = torch.bmm((a * emb_head_real * emb_rel_imag).unsqueeze(1), emb_tail_i) + imag_real_imag = torch.bmm((b * emb_head_imag * emb_rel_real).unsqueeze(1), emb_tail_i) + imag_imag_real = torch.bmm((b * emb_head_imag * emb_rel_imag).unsqueeze(1), emb_tail_real) + score = real_real_real + real_imag_imag + imag_real_imag - imag_imag_real + return score.squeeze(1)
+
+ + + +
+[docs] +class AConEx(BaseKGE): + """ Additive Convolutional ComplEx Knowledge Graph Embeddings """ + + def __init__(self, args): + super().__init__(args) + self.name = 'AConEx' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + # Convolution + self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_of_output_channels, + kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True) + self.fc_num_input = self.embedding_dim * 2 * self.num_of_output_channels + self.fc1 = torch.nn.Linear(self.fc_num_input, self.embedding_dim + self.embedding_dim) # Hard compression. + self.norm_fc1 = self.normalizer_class(self.embedding_dim + self.embedding_dim) + + self.bn_conv2d = torch.nn.BatchNorm2d(self.num_of_output_channels) + self.feature_map_dropout = torch.nn.Dropout2d(self.feature_map_dropout_rate) + +
+[docs] + def residual_convolution(self, C_1: Tuple[torch.Tensor, torch.Tensor], + C_2: Tuple[torch.Tensor, torch.Tensor]) -> torch.FloatTensor: + """ + Compute residual score of two complex-valued embeddings. + :param C_1: a tuple of two pytorch tensors that corresponds complex-valued embeddings + :param C_2: a tuple of two pytorch tensors that corresponds complex-valued embeddings + :return: + """ + emb_ent_real, emb_ent_imag_i = C_1 + emb_rel_real, emb_rel_imag_i = C_2 + # (N,C,H,W) : A single channel 2D image. + x = torch.cat([emb_ent_real.view(-1, 1, 1, self.embedding_dim // 2), + emb_ent_imag_i.view(-1, 1, 1, self.embedding_dim // 2), + emb_rel_real.view(-1, 1, 1, self.embedding_dim // 2), + emb_rel_imag_i.view(-1, 1, 1, self.embedding_dim // 2)], 2) + + x = torch.nn.functional.relu(self.bn_conv2d(self.conv2d(x))) + x = self.feature_map_dropout(x) + x = x.view(x.shape[0], -1) # reshape for NN. + x = torch.nn.functional.relu(self.norm_fc1(self.fc1(x))) + # + return torch.chunk(x, 4, dim=1)
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + # (3) Apply convolution operation on (1). + C_3 = self.residual_convolution(C_1=(emb_head_real, emb_head_imag), + C_2=(emb_rel_real, emb_rel_imag)) + a, b, c, d = C_3 + # (4) Retrieve tail entity embeddings. + emb_tail_real, emb_tail_imag = torch.hsplit(self.entity_embeddings.weight, 2) + # (5) Transpose (4). + emb_tail_real, emb_tail_imag = emb_tail_real.transpose(1, 0), emb_tail_imag.transpose(1, 0) + # (6) Hermitian inner product with additive Conv2D connection. + real_real_real = torch.mm(a + emb_head_real * emb_rel_real, emb_tail_real) + real_imag_imag = torch.mm(b + emb_head_real * emb_rel_imag, emb_tail_imag) + imag_real_imag = torch.mm(c + emb_head_imag * emb_rel_real, emb_tail_imag) + imag_imag_real = torch.mm(d + emb_head_imag * emb_rel_imag, emb_tail_real) + return real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
+ + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + emb_tail_real, emb_tail_imag = torch.hsplit(tail_ent_emb, 2) + # (2) Apply convolution operation on (1). + C_3 = self.residual_convolution(C_1=(emb_head_real, emb_head_imag), + C_2=(emb_rel_real, emb_rel_imag)) + a, b, c, d = C_3 + # (3) Hermitian inner product with additive Conv2D connection. + real_real_real = (a + emb_head_real * emb_rel_real * emb_tail_real).sum(dim=1) + real_imag_imag = (b + emb_head_real * emb_rel_imag * emb_tail_imag).sum(dim=1) + imag_real_imag = (c + emb_head_imag * emb_rel_real * emb_tail_imag).sum(dim=1) + imag_imag_real = (d + emb_head_imag * emb_rel_imag * emb_tail_real).sum(dim=1) + return real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
+ + +
+[docs] + def forward_k_vs_sample(self, x: torch.Tensor, target_entity_idx: torch.Tensor): + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + # (3) Apply convolution operation on (2). + C_3 = self.residual_convolution(C_1=(emb_head_real, emb_head_imag), + C_2=(emb_rel_real, emb_rel_imag)) + a, b, c, d = C_3 + + # (4) Retrieve selected tail entity embeddings + tail_entity_emb = self.normalize_tail_entity_embeddings(self.entity_embeddings(target_entity_idx)) + # (5) Split (4) into real and imaginary parts. + emb_tail_real, emb_tail_i = torch.tensor_split(tail_entity_emb, 2, dim=2) + # (6) Transpose (5) + emb_tail_real = emb_tail_real.transpose(1, 2) + emb_tail_i = emb_tail_i.transpose(1, 2) + # (7) Hermitian inner product with additive Conv2D connection + # (7.1) Elementwise multiply (2) according to the Hermitian Inner Product order + # (7.2) Additive connection: Add (3) into (7.1) + # (7.3) Batch matrix multiplication (7.2) and tail entity embeddings. + # https://pytorch.org/docs/stable/generated/torch.bmm.html + # input.shape (N, 1, D), mat2.shape (N,D,1) + real_real_real = torch.bmm((a + emb_head_real * emb_rel_real).unsqueeze(1), emb_tail_real) + real_imag_imag = torch.bmm((b + emb_head_real * emb_rel_imag).unsqueeze(1), emb_tail_i) + imag_real_imag = torch.bmm((c + emb_head_imag * emb_rel_real).unsqueeze(1), emb_tail_i) + imag_imag_real = torch.bmm((d + emb_head_imag * emb_rel_imag).unsqueeze(1), emb_tail_real) + score = real_real_real + real_imag_imag + imag_real_imag - imag_imag_real + # (N,1,1) => (N,1). + return score.squeeze(1)
+
+ + + +
+[docs] +class ComplEx(BaseKGE): + def __init__(self, args): + super().__init__(args) + self.name = 'ComplEx' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + +
+[docs] + def forward_triples(self, x: torch.LongTensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + emb_tail_real, emb_tail_imag = torch.hsplit(tail_ent_emb, 2) + # (3) Compute hermitian inner product. + real_real_real = (emb_head_real * emb_rel_real * emb_tail_real).sum(dim=1) + real_imag_imag = (emb_head_real * emb_rel_imag * emb_tail_imag).sum(dim=1) + imag_real_imag = (emb_head_imag * emb_rel_real * emb_tail_imag).sum(dim=1) + imag_imag_real = (emb_head_imag * emb_rel_imag * emb_tail_real).sum(dim=1) + return real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.LongTensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_imag = torch.hsplit(head_ent_emb, 2) + emb_rel_real, emb_rel_imag = torch.hsplit(rel_ent_emb, 2) + # (3) Transpose Entity embedding matrix to perform matrix multiplications in Hermitian Product. + emb_tail_real, emb_tail_imag = torch.hsplit(self.entity_embeddings.weight, 2) + emb_tail_real, emb_tail_imag = emb_tail_real.transpose(1, 0), emb_tail_imag.transpose(1, 0) + # (4) Compute hermitian inner product on embedding vectors. + real_real_real = torch.mm(emb_head_real * emb_rel_real, emb_tail_real) + real_imag_imag = torch.mm(emb_head_real * emb_rel_imag, emb_tail_imag) + imag_real_imag = torch.mm(emb_head_imag * emb_rel_real, emb_tail_imag) + imag_imag_real = torch.mm(emb_head_imag * emb_rel_imag, emb_tail_real) + return real_real_real + real_imag_imag + imag_real_imag - imag_imag_real
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/function_space.html b/_modules/dicee/models/function_space.html new file mode 100644 index 00000000..67da69e0 --- /dev/null +++ b/_modules/dicee/models/function_space.html @@ -0,0 +1,504 @@ + + + + + + + + dicee.models.function_space - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.function_space

+from .base_model import BaseKGE
+import torch
+import numpy as np
+from scipy.special import roots_legendre
+
+
+[docs] +class FMult(BaseKGE): + """ Learning Knowledge Neural Graphs""" + """ Learning Neural Networks for Knowledge Graphs""" + + def __init__(self, args): + super().__init__(args) + self.name = 'FMult' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + self.k = int(np.sqrt(self.embedding_dim // 2)) + self.num_sample = 50 + # self.gamma = torch.rand(self.k, self.num_sample) [0,1) uniform=> worse results + self.gamma = torch.randn(self.k, self.num_sample) # N(0,1) + from scipy.special import roots_legendre + roots, weights = roots_legendre(self.num_sample) + self.roots = torch.from_numpy(roots).repeat(self.k, 1).float() # shape self.k by self.n + self.weights = torch.from_numpy(weights).reshape(1, -1).float() # shape 1 by self.n + + +
+[docs] + def compute_func(self, weights: torch.FloatTensor, x) -> torch.FloatTensor: + n = len(weights) + # Weights for two linear layers. + w1, w2 = torch.hsplit(weights, 2) + # (1) Construct two-layered neural network + w1 = w1.view(n, self.k, self.k) + w2 = w2.view(n, self.k, self.k) + # (2) Forward Pass + out1 = torch.tanh(w1 @ x) # torch.sigmoid => worse results + out2 = w2 @ out1 + return out2 # no non-linearity => better results
+ + +
+[docs] + def chain_func(self, weights, x: torch.FloatTensor): + n = len(weights) + # Weights for two linear layers. + w1, w2 = torch.hsplit(weights, 2) + # (1) Construct two-layered neural network + w1 = w1.view(n, self.k, self.k) + w2 = w2.view(n, self.k, self.k) + # (2) Perform the forward pass + out1 = torch.tanh(torch.bmm(w1, x)) + out2 = torch.bmm(w2, out1) + return out2
+ + +
+[docs] + def forward_triples(self, idx_triple: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings: batch, \mathbb R^d + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(idx_triple) + # (2) Compute NNs on \Gamma + # Logits via FDistMult... + # h_x = self.compute_func(head_ent_emb, x=self.gamma) # batch, \mathbb{R}^k, |\Gamma| + # r_x = self.compute_func(rel_ent_emb, x=self.gamma) # batch, \mathbb{R}^k, |\Gamma| + # t_x = self.compute_func(tail_ent_emb, x=self.gamma) # batch, \mathbb{R}^k, |\Gamma| + # out = h_x * r_x * t_x # batch, \mathbb{R}^k, |gamma| + # (2) Compute NNs on \Gamma + self.gamma=self.gamma.to(head_ent_emb.device) + + h_x = self.compute_func(head_ent_emb, x=self.gamma) # batch, \mathbb{R}^k, |\Gamma| + t_x = self.compute_func(tail_ent_emb, x=self.gamma) # batch, \mathbb{R}^k, |\Gamma| + r_h_x = self.chain_func(weights=rel_ent_emb, x=h_x) # batch, \mathbb{R}^k, |\Gamma| + # (3) Compute |\Gamma| predictions + out = torch.sum(r_h_x * t_x, dim=1) # batch, |gamma| # + # (4) Average (3) over \Gamma + out = torch.mean(out, dim=1) # batch + return out
+
+ + +
+[docs] +class GFMult(BaseKGE): + """ Learning Knowledge Neural Graphs""" + """ Learning Neural Networks for Knowledge Graphs""" + + def __init__(self, args): + super().__init__(args) + self.name = 'GFMult' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + self.k = int(np.sqrt(self.embedding_dim // 2)) + self.num_sample = 250 + roots, weights = roots_legendre(self.num_sample) + self.roots = torch.from_numpy(roots).repeat(self.k, 1).float() # shape self.k by self.n + self.weights = torch.from_numpy(weights).reshape(1, -1).float() # shape 1 by self.n + +
+[docs] + def compute_func(self, weights: torch.FloatTensor, x) -> torch.FloatTensor: + n = len(weights) + # Weights for two linear layers. + w1, w2 = torch.hsplit(weights, 2) + # (1) Construct two-layered neural network + w1 = w1.view(n, self.k, self.k) + w2 = w2.view(n, self.k, self.k) + # (2) Forward Pass + out1 = torch.tanh(w1 @ x) # torch.sigmoid => worse results + out2 = w2 @ out1 + return out2 # no non-linearity => better results
+ + +
+[docs] + def chain_func(self, weights, x: torch.FloatTensor): + n = len(weights) + # Weights for two linear layers. + w1, w2 = torch.hsplit(weights, 2) + # (1) Construct two-layered neural network + w1 = w1.view(n, self.k, self.k) + w2 = w2.view(n, self.k, self.k) + # (2) Perform the forward pass + out1 = torch.tanh(torch.bmm(w1, x)) + out2 = torch.bmm(w2, out1) + return out2
+ + +
+[docs] + def forward_triples(self, idx_triple: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings: batch, \mathbb R^d + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(idx_triple) + # (2) Compute NNs on \Gamma + self.roots=self.roots.to(head_ent_emb.device) + self.weights=self.weights.to(head_ent_emb.device) + + h_x = self.compute_func(head_ent_emb, x=self.roots) # batch, \mathbb{R}^k, |\Gamma| + t_x = self.compute_func(tail_ent_emb, x=self.roots) # batch, \mathbb{R}^k, |\Gamma| + r_h_x = self.chain_func(weights=rel_ent_emb, x=h_x) # batch, \mathbb{R}^k, |\Gamma| + # (3) Compute |\Gamma| predictions. + out = torch.sum(r_h_x * t_x, dim=1)*self.weights # batch, |gamma| # + # (4) Average (3) over \Gamma + out = torch.mean(out, dim=1) # batch + return out
+
+ + + +
+[docs] +class FMult2(BaseKGE): + """ Learning Knowledge Neural Graphs""" + """ Learning Neural Networks for Knowledge Graphs""" + + def __init__(self, args): + super().__init__(args) + self.name = 'FMult2' + self.n_layers = 3 + tuned_embedding_dim = False + while int(np.sqrt((self.embedding_dim - 1) / self.n_layers)) != np.sqrt( + (self.embedding_dim - 1) / self.n_layers): + self.embedding_dim += 1 + tuned_embedding_dim = True + if tuned_embedding_dim: + print(f"\n\n*****Embedding dimension reset to {self.embedding_dim} to fit model architecture!*****\n") + self.k = int(np.sqrt((self.embedding_dim - 1) // self.n_layers)) + self.n = 50 + self.a, self.b = -1.0, 1.0 + # self.score_func = "vtp" # "vector triple product" + # self.score_func = "trilinear" + self.score_func = "compositional" + # self.score_func = "full-compositional" + # self.discrete_points = torch.linspace(self.a, self.b, steps=self.n) + self.discrete_points = torch.linspace(self.a, self.b, steps=self.n).repeat(self.k, 1) + + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + +
+[docs] + def build_func(self, Vec): + n = len(Vec) + # (1) Construct self.n_layers layered neural network + W = list(torch.hsplit(Vec[:, :-1], self.n_layers)) + # (2) Reshape weights of the layers + for i, w in enumerate(W): + W[i] = w.reshape(n, self.k, self.k) + return W, Vec[:, -1]
+ + +
+[docs] + def build_chain_funcs(self, list_Vec): + list_W = [] + list_b = [] + for Vec in list_Vec: + W_, b = self.build_func(Vec) + list_W.append(W_) + list_b.append(b) + + W = list_W[-1][1:] + for i in range(len(list_W) - 1): + for j, w in enumerate(list_W[i]): + if i == 0 and j == 0: + W_temp = w + else: + W_temp = w @ W_temp + W_temp = W_temp + list_b[i].reshape(-1, 1, 1) + W_temp = list_W[-1][0] @ W_temp / ((len(list_Vec) - 1) * w.shape[1]) + W.insert(0, W_temp) + return W, list_b[-1]
+ + +
+[docs] + def compute_func(self, W, b, x) -> torch.FloatTensor: + out = W[0] @ x + for i, w in enumerate(W[1:]): + if i % 2 == 0: # no non-linearity => better results + out = out + torch.tanh(w @ out) + else: + out = out + w @ out + return out + b.reshape(-1, 1, 1)
+ + +
+[docs] + def function(self, list_W, list_b): + def f(x): + if len(list_W) == 1: + return self.compute_func(list_W[0], list_b[0], x) + score = self.compute_func(list_W[0], list_b[0], x) + for W, b in zip(list_W[1:], list_b[1:]): + score = score * self.compute_func(W, b, x) + return score + + return f
+ + +
+[docs] + def trapezoid(self, list_W, list_b): + return torch.trapezoid(self.function(list_W, list_b)(self.discrete_points), x=self.discrete_points, dim=-1).sum( + dim=-1)
+ + +
+[docs] + def forward_triples(self, idx_triple: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings: batch, \mathbb R^d + head_ent_emb, rel_emb, tail_ent_emb = self.get_triple_representation(idx_triple) + if self.discrete_points.device != head_ent_emb.device: + self.discrete_points = self.discrete_points.to(head_ent_emb.device) + if self.score_func == "vtp": + h_W, h_b = self.build_func(head_ent_emb) + r_W, r_b = self.build_func(rel_emb) + t_W, t_b = self.build_func(tail_ent_emb) + out = -self.trapezoid([t_W], [t_b]) * self.trapezoid([h_W, r_W], [h_b, r_b]) + self.trapezoid([r_W], [ + r_b]) * self.trapezoid([t_W, h_W], [t_b, h_b]) + elif self.score_func == "compositional": + t_W, t_b = self.build_func(tail_ent_emb) + chain_W, chain_b = self.build_chain_funcs([head_ent_emb, rel_emb]) + out = self.trapezoid([chain_W, t_W], [chain_b, t_b]) + elif self.score_func == "full-compositional": + chain_W, chain_b = self.build_chain_funcs([head_ent_emb, rel_emb, tail_ent_emb]) + out = self.trapezoid([chain_W], [chain_b]) + elif self.score_func == "trilinear": + h_W, h_b = self.build_func(head_ent_emb) + r_W, r_b = self.build_func(rel_emb) + t_W, t_b = self.build_func(tail_ent_emb) + out = self.trapezoid([h_W, r_W, t_W], [h_b, r_b, t_b]) + return out
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/octonion.html b/_modules/dicee/models/octonion.html new file mode 100644 index 00000000..5f5ab926 --- /dev/null +++ b/_modules/dicee/models/octonion.html @@ -0,0 +1,753 @@ + + + + + + + + dicee.models.octonion - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.octonion

+import torch
+from .base_model import BaseKGE, IdentityClass
+
+
+
+[docs] +def octonion_mul(*, O_1, O_2): + x0, x1, x2, x3, x4, x5, x6, x7 = O_1 + y0, y1, y2, y3, y4, y5, y6, y7 = O_2 + x = x0 * y0 - x1 * y1 - x2 * y2 - x3 * y3 - x4 * y4 - x5 * y5 - x6 * y6 - x7 * y7 + e1 = x0 * y1 + x1 * y0 + x2 * y3 - x3 * y2 + x4 * y5 - x5 * y4 - x6 * y7 + x7 * y6 + e2 = x0 * y2 - x1 * y3 + x2 * y0 + x3 * y1 + x4 * y6 + x5 * y7 - x6 * y4 - x7 * y5 + e3 = x0 * y3 + x1 * y2 - x2 * y1 + x3 * y0 + x4 * y7 - x5 * y6 + x6 * y5 - x7 * y4 + e4 = x0 * y4 - x1 * y5 - x2 * y6 - x3 * y7 + x4 * y0 + x5 * y1 + x6 * y2 + x7 * y3 + e5 = x0 * y5 + x1 * y4 - x2 * y7 + x3 * y6 - x4 * y1 + x5 * y0 - x6 * y3 + x7 * y2 + e6 = x0 * y6 + x1 * y7 + x2 * y4 - x3 * y5 - x4 * y2 + x5 * y3 + x6 * y0 - x7 * y1 + e7 = x0 * y7 - x1 * y6 + x2 * y5 + x3 * y4 - x4 * y3 - x5 * y2 + x6 * y1 + x7 * y0 + + return x, e1, e2, e3, e4, e5, e6, e7
+ + + +
+[docs] +def octonion_mul_norm(*, O_1, O_2): + x0, x1, x2, x3, x4, x5, x6, x7 = O_1 + y0, y1, y2, y3, y4, y5, y6, y7 = O_2 + + # Normalize the relation to eliminate the scaling effect, may cause Nan due to floating point. + denominator = torch.sqrt(y0 ** 2 + y1 ** 2 + y2 ** 2 + y3 ** 2 + y4 ** 2 + y5 ** 2 + y6 ** 2 + y7 ** 2) + y0 = y0 / denominator + y1 = y1 / denominator + y2 = y2 / denominator + y3 = y3 / denominator + y4 = y4 / denominator + y5 = y5 / denominator + y6 = y6 / denominator + y7 = y7 / denominator + + x = x0 * y0 - x1 * y1 - x2 * y2 - x3 * y3 - x4 * y4 - x5 * y5 - x6 * y6 - x7 * y7 + e1 = x0 * y1 + x1 * y0 + x2 * y3 - x3 * y2 + x4 * y5 - x5 * y4 - x6 * y7 + x7 * y6 + e2 = x0 * y2 - x1 * y3 + x2 * y0 + x3 * y1 + x4 * y6 + x5 * y7 - x6 * y4 - x7 * y5 + e3 = x0 * y3 + x1 * y2 - x2 * y1 + x3 * y0 + x4 * y7 - x5 * y6 + x6 * y5 - x7 * y4 + e4 = x0 * y4 - x1 * y5 - x2 * y6 - x3 * y7 + x4 * y0 + x5 * y1 + x6 * y2 + x7 * y3 + e5 = x0 * y5 + x1 * y4 - x2 * y7 + x3 * y6 - x4 * y1 + x5 * y0 - x6 * y3 + x7 * y2 + e6 = x0 * y6 + x1 * y7 + x2 * y4 - x3 * y5 - x4 * y2 + x5 * y3 + x6 * y0 - x7 * y1 + e7 = x0 * y7 - x1 * y6 + x2 * y5 + x3 * y4 - x4 * y3 - x5 * y2 + x6 * y1 + x7 * y0 + + return x, e1, e2, e3, e4, e5, e6, e7
+ + + +
+[docs] +class OMult(BaseKGE): + def __init__(self, args): + super().__init__(args) + self.name = 'OMult' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + +
+[docs] + def octonion_normalizer(self, emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, + emb_rel_e7): + denominator = torch.sqrt( + emb_rel_e0 ** 2 + emb_rel_e1 ** 2 + emb_rel_e2 ** 2 + emb_rel_e3 ** 2 + emb_rel_e4 ** 2 + + emb_rel_e5 ** 2 + emb_rel_e6 ** 2 + emb_rel_e7 ** 2) + y0 = emb_rel_e0 / denominator + y1 = emb_rel_e1 / denominator + y2 = emb_rel_e2 / denominator + y3 = emb_rel_e3 / denominator + y4 = emb_rel_e4 / denominator + y5 = emb_rel_e5 / denominator + y6 = emb_rel_e6 / denominator + y7 = emb_rel_e7 / denominator + return y0, y1, y2, y3, y4, y5, y6, y7
+ + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + + # (2) Split (1) into real and imaginary parts. + emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7 = torch.hsplit( + head_ent_emb, 8) + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = torch.hsplit( + rel_ent_emb, + 8) + if isinstance(self.normalize_relation_embeddings, IdentityClass): + (emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7) = self.octonion_normalizer(emb_rel_e0, + emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, + emb_rel_e7) + + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 = torch.hsplit( + tail_ent_emb, 8) + # (3) Octonion Multiplication + e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul( + O_1=( + emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7)) + # (4) + # (4.3) Inner product + e0_score = (e0 * emb_tail_e0).sum(dim=1) + e1_score = (e1 * emb_tail_e1).sum(dim=1) + e2_score = (e2 * emb_tail_e2).sum(dim=1) + e3_score = (e3 * emb_tail_e3).sum(dim=1) + e4_score = (e4 * emb_tail_e4).sum(dim=1) + e5_score = (e5 * emb_tail_e5).sum(dim=1) + e6_score = (e6 * emb_tail_e6).sum(dim=1) + e7_score = (e7 * emb_tail_e7).sum(dim=1) + + return e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor): + """ + Given a head entity and a relation (h,r), we compute scores for all entities. + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + # (2) Split (1) into real and imaginary parts. + emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7 = torch.hsplit( + head_ent_emb, 8) + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = torch.hsplit( + rel_ent_emb, + 8) + if isinstance(self.normalize_relation_embeddings, IdentityClass): + (emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7) = self.octonion_normalizer(emb_rel_e0, emb_rel_e1, + emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, + emb_rel_e6, emb_rel_e7) + + # (3)Apply octonion multiplication + e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul( + O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, + emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7)) + + # Prepare all entities. + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 = torch.hsplit( + self.entity_embeddings.weight, 8) + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 \ + = emb_tail_e0.transpose(1, 0), emb_tail_e1.transpose(1, 0), \ + emb_tail_e2.transpose(1, 0), emb_tail_e3.transpose(1, 0), \ + emb_tail_e4.transpose(1, 0), emb_tail_e5.transpose(1, 0), \ + emb_tail_e6.transpose(1, 0), emb_tail_e7.transpose(1, 0) + + # (4) + # (4.4) Inner product + e0_score = torch.mm(e0, emb_tail_e0) + e1_score = torch.mm(e1, emb_tail_e1) + e2_score = torch.mm(e2, emb_tail_e2) + e3_score = torch.mm(e3, emb_tail_e3) + e4_score = torch.mm(e4, emb_tail_e4) + e5_score = torch.mm(e5, emb_tail_e5) + e6_score = torch.mm(e6, emb_tail_e6) + e7_score = torch.mm(e7, emb_tail_e7) + return e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
+
+ + + +
+[docs] +class ConvO(BaseKGE): + def __init__(self, args: dict): + super().__init__(args=args) + self.name = 'ConvO' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + # Convolution + self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_of_output_channels, + kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True) + self.fc_num_input = self.embedding_dim * 2 * self.num_of_output_channels + self.fc1 = torch.nn.Linear(self.fc_num_input, self.embedding_dim) # Hard compression. + self.bn_conv2d = torch.nn.BatchNorm2d(self.num_of_output_channels) + self.norm_fc1 = self.normalizer_class(self.embedding_dim) + self.feature_map_dropout = torch.nn.Dropout2d(self.feature_map_dropout_rate) + +
+[docs] + def octonion_normalizer(self, emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, + emb_rel_e7): + denominator = torch.sqrt( + emb_rel_e0 ** 2 + emb_rel_e1 ** 2 + emb_rel_e2 ** 2 + emb_rel_e3 ** 2 + emb_rel_e4 ** 2 + + emb_rel_e5 ** 2 + emb_rel_e6 ** 2 + emb_rel_e7 ** 2) + y0 = emb_rel_e0 / denominator + y1 = emb_rel_e1 / denominator + y2 = emb_rel_e2 / denominator + y3 = emb_rel_e3 / denominator + y4 = emb_rel_e4 / denominator + y5 = emb_rel_e5 / denominator + y6 = emb_rel_e6 / denominator + y7 = emb_rel_e7 / denominator + return y0, y1, y2, y3, y4, y5, y6, y7
+ + +
+[docs] + def residual_convolution(self, O_1, O_2): + emb_ent_e0, emb_ent_e1, emb_ent_e2, emb_ent_e3, emb_ent_e4, emb_ent_e5, emb_ent_e6, emb_ent_e7 = O_1 + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = O_2 + x = torch.cat([emb_ent_e0.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e1.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e2.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e3.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e4.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e5.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e6.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e7.view(-1, 1, 1, self.embedding_dim // 8), # entities + emb_rel_e0.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e1.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e2.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e3.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e4.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e5.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e6.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e7.view(-1, 1, 1, self.embedding_dim // 8), ], 2) + x = torch.nn.functional.relu(self.bn_conv2d(self.conv2d(x))) + x = self.feature_map_dropout(x) + x = x.view(x.shape[0], -1) # reshape for NN. + x = torch.nn.functional.relu(self.norm_fc1(self.fc1(x))) + return torch.chunk(x, 8, dim=1)
+ + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7 = torch.hsplit( + head_ent_emb, 8) + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = torch.hsplit( + rel_ent_emb, + 8) + if isinstance(self.normalize_relation_embeddings, IdentityClass): + (emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7) = self.octonion_normalizer( + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7) + + (emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, + emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7) = torch.hsplit( + tail_ent_emb, 8) + + # (2) Apply convolution operation on (1.1) and (1.2). + O_3 = self.residual_convolution(O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, + emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7)) + conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3 + + # (3) + # (3.1) Apply quaternion multiplication. + e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul( + O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, + emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7)) + # (4) + # (4.4) Inner product + e0_score = (conv_e0 * e0 * emb_tail_e0).sum(dim=1) + e1_score = (conv_e1 * e1 * emb_tail_e1).sum(dim=1) + e2_score = (conv_e2 * e2 * emb_tail_e2).sum(dim=1) + e3_score = (conv_e3 * e3 * emb_tail_e3).sum(dim=1) + e4_score = (conv_e4 * e4 * emb_tail_e4).sum(dim=1) + e5_score = (conv_e5 * e5 * emb_tail_e5).sum(dim=1) + e6_score = (conv_e6 * e6 * emb_tail_e6).sum(dim=1) + e7_score = (conv_e7 * e7 * emb_tail_e7).sum(dim=1) + return e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor): + """ + Given a head entity and a relation (h,r), we compute scores for all entities. + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + # (2) Split (1) into real and imaginary parts. + emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7 = torch.hsplit( + head_ent_emb, 8) + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = torch.hsplit( + rel_ent_emb, + 8) + if isinstance(self.normalize_relation_embeddings, IdentityClass): + (emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7) = self.octonion_normalizer( + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7) + + # (2) Apply convolution operation on (1.1) and (1.2). + O_3 = self.residual_convolution(O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, + emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7)) + conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3 + + # (3) + # (3.2) Apply quaternion multiplication on (1.1) and (3.1). + e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul( + O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, + emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7)) + + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 = torch.hsplit( + self.entity_embeddings.weight, 8) + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 = \ + emb_tail_e0.transpose(1, 0), emb_tail_e1.transpose(1, 0), \ + emb_tail_e2.transpose(1, 0), emb_tail_e3.transpose(1, 0), \ + emb_tail_e4.transpose(1, 0), emb_tail_e5.transpose(1, 0), emb_tail_e6.transpose(1, + 0), emb_tail_e7.transpose( + 1, 0) + + # (4) + # (4.4) Inner product + e0_score = torch.mm(conv_e0 * e0, emb_tail_e0) + e1_score = torch.mm(conv_e1 * e1, emb_tail_e1) + e2_score = torch.mm(conv_e2 * e2, emb_tail_e2) + e3_score = torch.mm(conv_e3 * e3, emb_tail_e3) + e4_score = torch.mm(conv_e4 * e4, emb_tail_e4) + e5_score = torch.mm(conv_e5 * e5, emb_tail_e5) + e6_score = torch.mm(conv_e6 * e6, emb_tail_e6) + e7_score = torch.mm(conv_e7 * e7, emb_tail_e7) + return e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
+
+ + + +
+[docs] +class AConvO(BaseKGE): + """ Additive Convolutional Octonion Knowledge Graph Embeddings """ + + def __init__(self, args: dict): + super().__init__(args=args) + self.name = 'AConvO' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + # Convolution + self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_of_output_channels, + kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True) + self.fc_num_input = self.embedding_dim * 2 * self.num_of_output_channels + self.fc1 = torch.nn.Linear(self.fc_num_input, self.embedding_dim) # Hard compression. + self.bn_conv2d = torch.nn.BatchNorm2d(self.num_of_output_channels) + self.norm_fc1 = self.normalizer_class(self.embedding_dim) + self.feature_map_dropout = torch.nn.Dropout2d(self.feature_map_dropout_rate) + +
+[docs] + def octonion_normalizer(self, emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, + emb_rel_e7): + denominator = torch.sqrt( + emb_rel_e0 ** 2 + emb_rel_e1 ** 2 + emb_rel_e2 ** 2 + + emb_rel_e3 ** 2 + emb_rel_e4 ** 2 + emb_rel_e5 ** 2 + emb_rel_e6 ** 2 + emb_rel_e7 ** 2) + y0 = emb_rel_e0 / denominator + y1 = emb_rel_e1 / denominator + y2 = emb_rel_e2 / denominator + y3 = emb_rel_e3 / denominator + y4 = emb_rel_e4 / denominator + y5 = emb_rel_e5 / denominator + y6 = emb_rel_e6 / denominator + y7 = emb_rel_e7 / denominator + return y0, y1, y2, y3, y4, y5, y6, y7
+ + +
+[docs] + def residual_convolution(self, O_1, O_2): + emb_ent_e0, emb_ent_e1, emb_ent_e2, emb_ent_e3, emb_ent_e4, emb_ent_e5, emb_ent_e6, emb_ent_e7 = O_1 + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = O_2 + x = torch.cat([emb_ent_e0.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e1.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e2.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e3.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e4.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e5.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e6.view(-1, 1, 1, self.embedding_dim // 8), + emb_ent_e7.view(-1, 1, 1, self.embedding_dim // 8), # entities + emb_rel_e0.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e1.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e2.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e3.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e4.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e5.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e6.view(-1, 1, 1, self.embedding_dim // 8), + emb_rel_e7.view(-1, 1, 1, self.embedding_dim // 8), ], 2) + x = torch.nn.functional.relu(self.bn_conv2d(self.conv2d(x))) + x = self.feature_map_dropout(x) + x = x.view(x.shape[0], -1) # reshape for NN. + x = torch.nn.functional.relu(self.norm_fc1(self.fc1(x))) + return torch.chunk(x, 8, dim=1)
+ + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7 = torch.hsplit( + head_ent_emb, 8) + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = torch.hsplit( + rel_ent_emb, + 8) + if isinstance(self.normalize_relation_embeddings, IdentityClass): + (emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7) = self.octonion_normalizer(emb_rel_e0, emb_rel_e1, + emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, + emb_rel_e6, emb_rel_e7) + + (emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, + emb_tail_e7) = torch.hsplit( + tail_ent_emb, 8) + + # (2) Apply convolution operation on (1.1) and (1.2). + O_3 = self.residual_convolution(O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, + emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7)) + conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3 + + # (3) + # (3.1) Apply quaternion multiplication. + e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul( + O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, + emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7)) + # (4) + # (4.4) Inner product + e0_score = (conv_e0 + e0 * emb_tail_e0).sum(dim=1) + e1_score = (conv_e1 + e1 * emb_tail_e1).sum(dim=1) + e2_score = (conv_e2 + e2 * emb_tail_e2).sum(dim=1) + e3_score = (conv_e3 + e3 * emb_tail_e3).sum(dim=1) + e4_score = (conv_e4 + e4 * emb_tail_e4).sum(dim=1) + e5_score = (conv_e5 + e5 * emb_tail_e5).sum(dim=1) + e6_score = (conv_e6 + e6 * emb_tail_e6).sum(dim=1) + e7_score = (conv_e7 + e7 * emb_tail_e7).sum(dim=1) + return e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor): + """ + Given a head entity and a relation (h,r), we compute scores for all entities. + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + # (2) Split (1) into real and imaginary parts. + (emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, + emb_head_e5, emb_head_e6, emb_head_e7) = torch.hsplit( + head_ent_emb, 8) + emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7 = torch.hsplit( + rel_ent_emb, + 8) + if isinstance(self.normalize_relation_embeddings, IdentityClass): + (emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7) = self.octonion_normalizer(emb_rel_e0, emb_rel_e1, + emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, + emb_rel_e6, emb_rel_e7) + + # (2) Apply convolution operation on (1.1) and (1.2). + O_3 = self.residual_convolution(O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, + emb_head_e4, emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, + emb_rel_e4, emb_rel_e5, emb_rel_e6, emb_rel_e7)) + conv_e0, conv_e1, conv_e2, conv_e3, conv_e4, conv_e5, conv_e6, conv_e7 = O_3 + + # (3) + # (3.2) Apply quaternion multiplication on (1.1) and (3.1). + e0, e1, e2, e3, e4, e5, e6, e7 = octonion_mul( + O_1=(emb_head_e0, emb_head_e1, emb_head_e2, emb_head_e3, emb_head_e4, + emb_head_e5, emb_head_e6, emb_head_e7), + O_2=(emb_rel_e0, emb_rel_e1, emb_rel_e2, emb_rel_e3, emb_rel_e4, + emb_rel_e5, emb_rel_e6, emb_rel_e7)) + + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 = \ + torch.hsplit(self.entity_embeddings.weight, 8) + emb_tail_e0, emb_tail_e1, emb_tail_e2, emb_tail_e3, emb_tail_e4, emb_tail_e5, emb_tail_e6, emb_tail_e7 = \ + emb_tail_e0.transpose(1, 0), emb_tail_e1.transpose(1, 0), \ + emb_tail_e2.transpose(1, 0), emb_tail_e3.transpose(1, 0), emb_tail_e4.transpose( + 1, 0), emb_tail_e5.transpose(1, 0), emb_tail_e6.transpose(1, 0), emb_tail_e7.transpose(1, 0) + + # (4) + # (4.4) Inner product + e0_score = torch.mm(conv_e0 + e0, emb_tail_e0) + e1_score = torch.mm(conv_e1 + e1, emb_tail_e1) + e2_score = torch.mm(conv_e2 + e2, emb_tail_e2) + e3_score = torch.mm(conv_e3 + e3, emb_tail_e3) + e4_score = torch.mm(conv_e4 + e4, emb_tail_e4) + e5_score = torch.mm(conv_e5 + e5, emb_tail_e5) + e6_score = torch.mm(conv_e6 + e6, emb_tail_e6) + e7_score = torch.mm(conv_e7 + e7, emb_tail_e7) + return e0_score + e1_score + e2_score + e3_score + e4_score + e5_score + e6_score + e7_score
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/pykeen_models.html b/_modules/dicee/models/pykeen_models.html new file mode 100644 index 00000000..f4014498 --- /dev/null +++ b/_modules/dicee/models/pykeen_models.html @@ -0,0 +1,352 @@ + + + + + + + + dicee.models.pykeen_models - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.pykeen_models

+import torch
+import torch.utils.data
+from pykeen.models import model_resolver
+from .base_model import BaseKGE
+from collections import namedtuple
+
+
+[docs] +class PykeenKGE(BaseKGE): + """ A class for using knowledge graph embedding models implemented in Pykeen + + Notes: + Pykeen_DistMult: C + Pykeen_ComplEx: + Pykeen_QuatE: + Pykeen_MuRE: + Pykeen_CP: + Pykeen_HolE: + Pykeen_HolE: + """ + + def __init__(self, args: dict): + super().__init__(args) + self.model_kwargs = {'embedding_dim': args['embedding_dim'], + 'entity_initializer': None if args['init_param'] is None else torch.nn.init.xavier_normal_, + "random_seed": args["random_seed"] + } + self.model_kwargs.update(args['pykeen_model_kwargs']) + self.name = args['model'].split("_")[1] + # Solving memory issue of Pykeen models caused by the regularizers + # See https://github.com/pykeen/pykeen/issues/1297 + if self.name=="MuRE": + "No Regularizer => no Memory Leakage" + # https://pykeen.readthedocs.io/en/stable/api/pykeen.models.MuRE.html + elif self.name == "QuatE": + self.model_kwargs["entity_regularizer"] = None + self.model_kwargs["relation_regularizer"] = None + elif self.name == "DistMult": + self.model_kwargs["regularizer"] = None + elif self.name == "BoxE": + pass + elif self.name == "CP": + # No regularizers + pass + elif self.name == "HolE": + # No regularizers + pass + elif self.name == "ProjE": + # Nothing + pass + elif self.name == "RotatE": + pass + elif self.name == "TransE": + self.model_kwargs["regularizer"] = None + else: + print("Pykeen model have a memory leak caused by their implementation of regularizers") + print(f"{self.name} does not seem to have any regularizer") + + self.model = model_resolver. \ + make(self.name, self.model_kwargs, triples_factory= + namedtuple('triples_factory', + ['num_entities', 'num_relations', 'create_inverse_triples'])( + self.num_entities, self.num_relations, False)) + self.loss_history = [] + self.args = args + self.entity_embeddings = None + self.relation_embeddings = None + for (k, v) in self.model.named_modules(): + if "entity_representations" == k: + self.entity_embeddings = v[0]._embeddings + elif "relation_representations" == k: + self.relation_embeddings = v[0]._embeddings + elif "interaction" == k: + self.interaction = v + else: + pass + +
+[docs] + def forward_k_vs_all(self, x: torch.LongTensor): + """ + # => Explicit version by this we can apply bn and dropout + + # (1) Retrieve embeddings of heads and relations + apply Dropout & Normalization if given. + h, r = self.get_head_relation_representation(x) + # (2) Reshape (1). + if self.last_dim > 0: + h = h.reshape(len(x), self.embedding_dim, self.last_dim) + r = r.reshape(len(x), self.embedding_dim, self.last_dim) + # (3) Reshape all entities. + if self.last_dim > 0: + t = self.entity_embeddings.weight.reshape(self.num_entities, self.embedding_dim, self.last_dim) + else: + t = self.entity_embeddings.weight + # (4) Call the score_t from interactions to generate triple scores. + return self.interaction.score_t(h=h, r=r, all_entities=t, slice_size=1) + """ + + return self.model.score_t(x)
+ + +
+[docs] + def forward_triples(self, x: torch.LongTensor) -> torch.FloatTensor: + """ + # => Explicit version by this we can apply bn and dropout + + # (1) Retrieve embeddings of heads, relations and tails and apply Dropout & Normalization if given. + h, r, t = self.get_triple_representation(x) + # (2) Reshape (1). + if self.last_dim > 0: + h = h.reshape(len(x), self.embedding_dim, self.last_dim) + r = r.reshape(len(x), self.embedding_dim, self.last_dim) + t = t.reshape(len(x), self.embedding_dim, self.last_dim) + # (3) Compute the triple score + return self.interaction.score(h=h, r=r, t=t, slice_size=None, slice_dim=0) + """ + return self.model.score_hrt(hrt_batch=x, mode=None).flatten()
+ + +
+[docs] + def forward_k_vs_sample(self, x: torch.LongTensor, target_entity_idx): + raise NotImplementedError(f"KvsSample has not yet implemented for {self.name}")
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/quaternion.html b/_modules/dicee/models/quaternion.html new file mode 100644 index 00000000..584e0e51 --- /dev/null +++ b/_modules/dicee/models/quaternion.html @@ -0,0 +1,686 @@ + + + + + + + + dicee.models.quaternion - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.quaternion

+import torch
+from .static_funcs import quaternion_mul
+from .base_model import BaseKGE, IdentityClass
+
+
+
+[docs] +def quaternion_mul_with_unit_norm(*, Q_1, Q_2): + a_h, b_h, c_h, d_h = Q_1 # = {a_h + b_h i + c_h j + d_h k : a_r, b_r, c_r, d_r \in R^k} + a_r, b_r, c_r, d_r = Q_2 # = {a_r + b_r i + c_r j + d_r k : a_r, b_r, c_r, d_r \in R^k} + + # Normalize the relation to eliminate the scaling effect + denominator = torch.sqrt(a_r ** 2 + b_r ** 2 + c_r ** 2 + d_r ** 2) + p = a_r / denominator + q = b_r / denominator + u = c_r / denominator + v = d_r / denominator + # Q'=E Hamilton product R + r_val = a_h * p - b_h * q - c_h * u - d_h * v + i_val = a_h * q + b_h * p + c_h * v - d_h * u + j_val = a_h * u - b_h * v + c_h * p + d_h * q + k_val = a_h * v + b_h * u - c_h * q + d_h * p + return r_val, i_val, j_val, k_val
+ + + +
+[docs] +class QMult(BaseKGE): + def __init__(self, args): + super().__init__(args) + self.name = 'QMult' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + self.explicit = True + if self.explicit is False: + _1, _i, _j, _k = 0, 1, 2, 3 + self.multiplication_table = torch.zeros(4, 4, 4) + for i, j, k, v in [ + # 1 * ? = ?; ? * 1 = ? + (_1, _1, _1, 1), + (_1, _i, _i, 1), + (_1, _j, _j, 1), + (_1, _k, _k, 1), + (_i, _1, _i, 1), + (_j, _1, _j, 1), + (_k, _1, _k, 1), + # i**2 = j**2 = k**2 = -1 + (_i, _i, _1, -1), + (_j, _j, _1, -1), + (_k, _k, _1, -1), + # i * j = k; i * k = -j + (_i, _j, _k, 1), + (_i, _k, _j, -1), + # j * i = -k, j * k = i + (_j, _i, _k, -1), + (_j, _k, _i, 1), + # k * i = j; k * j = -i + (_k, _i, _j, 1), + (_k, _j, _i, -1), + ]: + self.multiplication_table[i, j, k] = v + +
+[docs] + def quaternion_multiplication_followed_by_inner_product(self, h, r, t): + """ + :param h: shape: (`*batch_dims`, dim) + The head representations. + :param r: shape: (`*batch_dims`, dim) + The head representations. + :param t: shape: (`*batch_dims`, dim) + The tail representations. + :return: + Triple scores. + """ + n, d = h.shape + h = h.reshape(n, d // 4, 4) + r = r.reshape(n, d // 4, 4) + t = t.reshape(n, d // 4, 4) + return -torch.einsum("...di, ...dj, ...dk, ijk -> ...", h, r, t, self.multiplication_table)
+ + +
+[docs] + @staticmethod + def quaternion_normalizer(x: torch.FloatTensor) -> torch.FloatTensor: + r""" + Normalize the length of relation vectors, if the forward constraint has not been applied yet. + + Absolute value of a quaternion + + .. math:: + + |a + bi + cj + dk| = \sqrt{a^2 + b^2 + c^2 + d^2} + + L2 norm of quaternion vector: + + .. math:: + \|x\|^2 = \sum_{i=1}^d |x_i|^2 + = \sum_{i=1}^d (x_i.re^2 + x_i.im_1^2 + x_i.im_2^2 + x_i.im_3^2) + :param x: + The vector. + + :return: + The normalized vector. + """ + # Normalize relation embeddings + shape = x.shape + x = x.view(*shape[:-1], -1, 4) + x = torch.nn.functional.normalize(x, p=2, dim=-1) + return x.view(*shape)
+ + +
+[docs] + def forward_triples(self, indexed_triple: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(indexed_triple) + + # (1.1) If No normalization set, we need to apply quaternion normalization + if isinstance(self.normalize_relation_embeddings, IdentityClass): + rel_ent_emb = self.quaternion_normalizer(rel_ent_emb) + if self.explicit is False: + return self.quaternion_multiplication_followed_by_inner_product(head_ent_emb, rel_ent_emb, tail_ent_emb) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.hsplit(tail_ent_emb, 4) + # (2) + # (2.1) Apply quaternion multiplication on (1.1) and (2.1). + r_val, i_val, j_val, k_val = quaternion_mul(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + # (3) + # (3.1) Inner product + real_score = torch.sum(r_val * emb_tail_real, dim=1) + i_score = torch.sum(i_val * emb_tail_i, dim=1) + j_score = torch.sum(j_val * emb_tail_j, dim=1) + k_score = torch.sum(k_val * emb_tail_k, dim=1) + return real_score + i_score + j_score + k_score
+ + +
+[docs] + def forward_k_vs_all(self, x): + """ + Completed. + Given a head entity and a relation (h,r), we compute scores for all possible triples,i.e., + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (1.1) If No normalization set, we need to apply quaternion normalization + if isinstance(self.normalize_relation_embeddings, IdentityClass): + rel_ent_emb = self.quaternion_normalizer(rel_ent_emb) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + r_val, i_val, j_val, k_val = quaternion_mul(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.hsplit(self.entity_embeddings.weight, 4) + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = emb_tail_real.transpose(1, 0), emb_tail_i.transpose(1, 0), \ + emb_tail_j.transpose(1, 0), emb_tail_k.transpose(1, 0) + + # (3) + # (3.1) Inner product + real_score = torch.mm(r_val, emb_tail_real) + i_score = torch.mm(i_val, emb_tail_i) + j_score = torch.mm(j_val, emb_tail_j) + k_score = torch.mm(k_val, emb_tail_k) + + return real_score + i_score + j_score + k_score
+ + +
+[docs] + def forward_k_vs_sample(self, x, target_entity_idx): + """ + Completed. + Given a head entity and a relation (h,r), we compute scores for all possible triples,i.e., + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (1.1) If No normalization set, we need to apply quaternion normalization + if isinstance(self.normalize_relation_embeddings, IdentityClass): + rel_ent_emb = self.quaternion_normalizer(rel_ent_emb) + + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + r_val, i_val, j_val, k_val = quaternion_mul(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + + # (batch size, num. selected entity, dimension) + tail_entity_emb = self.entity_embeddings(target_entity_idx) + # quaternion vectors + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.tensor_split(tail_entity_emb, 4, dim=2) + + emb_tail_real = emb_tail_real.transpose(1, 2) + emb_tail_i = emb_tail_i.transpose(1, 2) + emb_tail_j = emb_tail_j.transpose(1, 2) + emb_tail_k = emb_tail_k.transpose(1, 2) + + # (batch size, 1, dimension) + r_val = r_val.unsqueeze(1) + i_val = i_val.unsqueeze(1) + j_val = j_val.unsqueeze(1) + k_val = k_val.unsqueeze(1) + + real_score = torch.bmm(r_val, emb_tail_real) + i_score = torch.bmm(i_val, emb_tail_i) + j_score = torch.bmm(j_val, emb_tail_j) + k_score = torch.bmm(k_val, emb_tail_k) + + return (real_score + i_score + j_score + k_score).squeeze(1)
+
+ + + +
+[docs] +class ConvQ(BaseKGE): + """ Convolutional Quaternion Knowledge Graph Embeddings + + """ + + def __init__(self, args): + super().__init__(args) + self.name = 'ConvQ' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + # Convolution + self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_of_output_channels, + kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True) + + self.fc_num_input = self.embedding_dim * 2 * self.num_of_output_channels # 8 because of 8 real values in 2 quaternions + self.fc1 = torch.nn.Linear(self.fc_num_input, self.embedding_dim) # Hard compression. + + self.bn_conv1 = torch.nn.BatchNorm2d(self.num_of_output_channels) + self.bn_conv2 = self.normalizer_class(self.embedding_dim) + self.feature_map_dropout = torch.nn.Dropout2d(self.feature_map_dropout_rate) + +
+[docs] + def residual_convolution(self, Q_1, Q_2): + emb_ent_real, emb_ent_imag_i, emb_ent_imag_j, emb_ent_imag_k = Q_1 + emb_rel_real, emb_rel_imag_i, emb_rel_imag_j, emb_rel_imag_k = Q_2 + x = torch.cat([emb_ent_real.view(-1, 1, 1, self.embedding_dim // 4), + emb_ent_imag_i.view(-1, 1, 1, self.embedding_dim // 4), + emb_ent_imag_j.view(-1, 1, 1, self.embedding_dim // 4), + emb_ent_imag_k.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_real.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_imag_i.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_imag_j.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_imag_k.view(-1, 1, 1, self.embedding_dim // 4)], 2) + + # n, c_in, h_in, w_in x.shape before conv. h_in=8, w_in embeddings + x = self.conv2d(x) + # n, c_out, h_out, w_out x.shape after conv. + x = self.bn_conv1(x) + x = torch.nn.functional.relu(x) + x = self.feature_map_dropout(x) + x = x.view(x.shape[0], -1) # reshape for NN. + x = torch.nn.functional.relu(self.bn_conv2(self.fc1(x))) + return torch.chunk(x, 4, dim=1)
+ + +
+[docs] + def forward_triples(self, indexed_triple: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(indexed_triple) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.hsplit(tail_ent_emb, 4) + + # (2) Apply convolution operation on (1.1) and (1.2). + Q_3 = self.residual_convolution(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + conv_real, conv_imag_i, conv_imag_j, conv_imag_k = Q_3 + # (3) + # (3.1) Apply quaternion multiplication on (1.1) and (3.1). + r_val, i_val, j_val, k_val = quaternion_mul( + Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + # (4) + # (4.1) Hadamard product of (2) with (3) and inner product with tails + real_score = torch.sum(conv_real * r_val * emb_tail_real, dim=1) + i_score = torch.sum(conv_imag_i * i_val * emb_tail_i, dim=1) + j_score = torch.sum(conv_imag_j * j_val * emb_tail_j, dim=1) + k_score = torch.sum(conv_imag_k * k_val * emb_tail_k, dim=1) + return real_score + i_score + j_score + k_score
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor): + """ + Given a head entity and a relation (h,r), we compute scores for all entities. + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + + # (2) Apply convolution operation on (1.1) and (1.2). + Q_3 = self.residual_convolution(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + conv_real, conv_imag_i, conv_imag_j, conv_imag_k = Q_3 + + # (3) + # (3.1) Apply quaternion multiplication. + r_val, i_val, j_val, k_val = quaternion_mul(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + # Prepare all entity embeddings. + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.hsplit(self.entity_embeddings.weight, 4) + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = emb_tail_real.transpose(1, 0), \ + emb_tail_i.transpose(1, 0), emb_tail_j.transpose( + 1, 0), emb_tail_k.transpose(1, 0) + + # (4) + # (4.1) Hadamard product of (2) with (3) and inner product with tails + real_score = torch.mm(conv_real * r_val, emb_tail_real) + i_score = torch.mm(conv_imag_i * i_val, emb_tail_i) + j_score = torch.mm(conv_imag_j * j_val, emb_tail_j) + k_score = torch.mm(conv_imag_k * k_val, emb_tail_k) + + return real_score + i_score + j_score + k_score
+
+ + + +
+[docs] +class AConvQ(BaseKGE): + """ Additive Convolutional Quaternion Knowledge Graph Embeddings """ + + def __init__(self, args): + super().__init__(args) + self.name = 'AConvQ' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + # Convolution + self.conv2d = torch.nn.Conv2d(in_channels=1, out_channels=self.num_of_output_channels, + kernel_size=(self.kernel_size, self.kernel_size), stride=1, padding=1, bias=True) + + self.fc_num_input = self.embedding_dim * 2 * self.num_of_output_channels # 8 because of 8 real values in 2 quaternions + self.fc1 = torch.nn.Linear(self.fc_num_input, self.embedding_dim) # Hard compression. + + self.bn_conv1 = torch.nn.BatchNorm2d(self.num_of_output_channels) + self.bn_conv2 = self.normalizer_class(self.embedding_dim) + self.feature_map_dropout = torch.nn.Dropout2d(self.feature_map_dropout_rate) + +
+[docs] + def residual_convolution(self, Q_1, Q_2): + emb_ent_real, emb_ent_imag_i, emb_ent_imag_j, emb_ent_imag_k = Q_1 + emb_rel_real, emb_rel_imag_i, emb_rel_imag_j, emb_rel_imag_k = Q_2 + x = torch.cat([emb_ent_real.view(-1, 1, 1, self.embedding_dim // 4), + emb_ent_imag_i.view(-1, 1, 1, self.embedding_dim // 4), + emb_ent_imag_j.view(-1, 1, 1, self.embedding_dim // 4), + emb_ent_imag_k.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_real.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_imag_i.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_imag_j.view(-1, 1, 1, self.embedding_dim // 4), + emb_rel_imag_k.view(-1, 1, 1, self.embedding_dim // 4)], 2) + + # n, c_in, h_in, w_in x.shape before conv. h_in=8, w_in embeddings + x = self.conv2d(x) + # n, c_out, h_out, w_out x.shape after conv. + x = self.bn_conv1(x) + x = torch.nn.functional.relu(x) + x = self.feature_map_dropout(x) + x = x.view(x.shape[0], -1) # reshape for NN. + x = torch.nn.functional.relu(self.bn_conv2(self.fc1(x))) + return torch.chunk(x, 4, dim=1)
+ + +
+[docs] + def forward_triples(self, indexed_triple: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(indexed_triple) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.hsplit(tail_ent_emb, 4) + + # (2) Apply convolution operation on (1.1) and (1.2). + Q_3 = self.residual_convolution(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + conv_real, conv_imag_i, conv_imag_j, conv_imag_k = Q_3 + # (3) + # (3.1) Apply quaternion multiplication on (1.1) and (3.1). + r_val, i_val, j_val, k_val = quaternion_mul( + Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + # (4) + # (4.1) Hadamard product of (2) with (3) and inner product with tails + real_score = torch.sum(conv_real + r_val * emb_tail_real, dim=1) + i_score = torch.sum(conv_imag_i + i_val * emb_tail_i, dim=1) + j_score = torch.sum(conv_imag_j + j_val * emb_tail_j, dim=1) + k_score = torch.sum(conv_imag_k + k_val * emb_tail_k, dim=1) + return real_score + i_score + j_score + k_score
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor): + """ + Given a head entity and a relation (h,r), we compute scores for all entities. + [score(h,r,x)|x \in Entities] => [0.0,0.1,...,0.8], shape=> (1, |Entities|) + Given a batch of head entities and relations => shape (size of batch,| Entities|) + """ + + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb = self.get_head_relation_representation(x) + # (2) Split (1) into real and imaginary parts. + emb_head_real, emb_head_i, emb_head_j, emb_head_k = torch.hsplit(head_ent_emb, 4) + emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k = torch.hsplit(rel_ent_emb, 4) + + # (2) Apply convolution operation on (1.1) and (1.2). + Q_3 = self.residual_convolution(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + conv_real, conv_imag_i, conv_imag_j, conv_imag_k = Q_3 + + # (3) + # (3.1) Apply quaternion multiplication. + r_val, i_val, j_val, k_val = quaternion_mul(Q_1=(emb_head_real, emb_head_i, emb_head_j, emb_head_k), + Q_2=(emb_rel_real, emb_rel_i, emb_rel_j, emb_rel_k)) + # Prepare all entity embeddings. + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = torch.hsplit(self.entity_embeddings.weight, 4) + emb_tail_real, emb_tail_i, emb_tail_j, emb_tail_k = emb_tail_real.transpose(1, 0), \ + emb_tail_i.transpose(1, 0), emb_tail_j.transpose( + 1, 0), emb_tail_k.transpose(1, 0) + + # (4) + # (4.1) Hadamard product of (2) with (3) and inner product with tails + real_score = torch.mm(conv_real + r_val, emb_tail_real) + i_score = torch.mm(conv_imag_i + i_val, emb_tail_i) + j_score = torch.mm(conv_imag_j + j_val, emb_tail_j) + k_score = torch.mm(conv_imag_k + k_val, emb_tail_k) + + return real_score + i_score + j_score + k_score
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/real.html b/_modules/dicee/models/real.html new file mode 100644 index 00000000..65d1862b --- /dev/null +++ b/_modules/dicee/models/real.html @@ -0,0 +1,398 @@ + + + + + + + + dicee.models.real - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.real

+from .base_model import BaseKGE
+from typing import Tuple
+import torch
+import numpy as np
+
+
+
+[docs] +class DistMult(BaseKGE): + """ + Embedding Entities and Relations for Learning and Inference in Knowledge Bases + https://arxiv.org/abs/1412.6575""" + + def __init__(self, args): + super().__init__(args) + self.name = 'DistMult' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.Tensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Compute the score + return (self.hidden_dropout(self.hidden_normalizer(head_ent_emb * rel_ent_emb)) * tail_ent_emb).sum(dim=1)
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.LongTensor): + emb_head_real, emb_rel_real = self.get_head_relation_representation(x) + return torch.mm(self.hidden_dropout(self.hidden_normalizer(emb_head_real * emb_rel_real)), + self.entity_embeddings.weight.transpose(1, 0))
+ + +
+[docs] + def forward_k_vs_sample(self, x: torch.LongTensor, target_entity_idx: torch.LongTensor): + emb_head_real, emb_rel_real = self.get_head_relation_representation(x) + hr = self.hidden_dropout(self.hidden_normalizer(emb_head_real * emb_rel_real)).unsqueeze(1) + t = self.entity_embeddings(target_entity_idx).transpose(1, 2) + return torch.bmm(hr, t).squeeze(1)
+ + +
+[docs] + def score(self,h,r,t): + return (self.hidden_dropout(self.hidden_normalizer(h * r)) * t).sum(dim=1)
+
+ +
+[docs] +class TransE(BaseKGE): + """ + Translating Embeddings for Modeling + Multi-relational Data + https://proceedings.neurips.cc/paper/2013/file/1cecc7a77928ca8133fa24680a88d2f9-Paper.pdf""" + + def __init__(self, args): + super().__init__(args) + self.name = 'TransE' + self._norm = 2 + self.margin = 4 + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + +
+[docs] + def forward_triples(self, x: torch.Tensor) -> torch.FloatTensor: + # (1) Retrieve embeddings & Apply Dropout & Normalization. + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # Original d:=|| s+p - t||_2 \approx 0 distance, if true + # if d =0 sigma(5-0) => 1 + # if d =5 sigma(5-5) => 0.5 + # Update: sigmoid( \gamma - d) + distance = self.margin - torch.nn.functional.pairwise_distance(head_ent_emb + rel_ent_emb, tail_ent_emb, + p=self._norm) + return distance
+ + +
+[docs] + def forward_k_vs_all(self, x: torch.Tensor) -> torch.FloatTensor: + emb_head_real, emb_rel_real = self.get_head_relation_representation(x) + distance = torch.nn.functional.pairwise_distance(torch.unsqueeze(emb_head_real + emb_rel_real, 1), + self.entity_embeddings.weight, p=self._norm) + return self.margin - distance
+
+ + + +
+[docs] +class Shallom(BaseKGE): + """ A shallow neural model for relation prediction (https://arxiv.org/abs/2101.09090) """ + + def __init__(self, args): + super().__init__(args) + self.name = 'Shallom' + # Fixed + shallom_width = int(2 * self.embedding_dim) + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data) + self.shallom = torch.nn.Sequential(torch.nn.Dropout(self.input_dropout_rate), + torch.nn.Linear(self.embedding_dim * 2, shallom_width), + self.normalizer_class(shallom_width), + torch.nn.ReLU(), + torch.nn.Dropout(self.hidden_dropout_rate), + torch.nn.Linear(shallom_width, self.num_relations)) + +
+[docs] + def get_embeddings(self) -> Tuple[np.ndarray, None]: + return self.entity_embeddings.weight.data.detach(), None
+ + +
+[docs] + def forward_k_vs_all(self, x) -> torch.FloatTensor: + e1_idx: torch.Tensor + e2_idx: torch.Tensor + e1_idx, e2_idx = x[:, 0], x[:, 1] + emb_s, emb_o = self.entity_embeddings(e1_idx), self.entity_embeddings(e2_idx) + return self.shallom(torch.cat((emb_s, emb_o), 1))
+ + +
+[docs] + def forward_triples(self, x) -> torch.FloatTensor: + """ + + :param x: + :return: + """ + + n, d = x.shape + assert d == 3 + scores_for_all_relations = self.forward_k_vs_all(x[:, [0, 2]]) + return scores_for_all_relations[:, x[:, 1]].flatten()
+
+ + + +
+[docs] +class Pyke(BaseKGE): + """ A Physical Embedding Model for Knowledge Graphs """ + + def __init__(self, args): + super().__init__(args) + self.name = 'Pyke' + self.entity_embeddings = torch.nn.Embedding(self.num_entities, self.embedding_dim) + self.relation_embeddings = torch.nn.Embedding(self.num_relations, self.embedding_dim) + self.param_init(self.entity_embeddings.weight.data), self.param_init(self.relation_embeddings.weight.data) + self.dist_func = torch.nn.PairwiseDistance(p=2) + self.margin = 1.0 + +
+[docs] + def forward_triples(self, x: torch.LongTensor): + # (1) get embeddings for a batch of entities and relations + head_ent_emb, rel_ent_emb, tail_ent_emb = self.get_triple_representation(x) + # (2) Compute the Euclidean distance from head to relation + dist_head_rel = self.dist_func(head_ent_emb, rel_ent_emb) + dist_rel_tail = self.dist_func(rel_ent_emb, tail_ent_emb) + avg_dist = (dist_head_rel + dist_rel_tail) / 2 + return self.margin - avg_dist
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/models/static_funcs.html b/_modules/dicee/models/static_funcs.html new file mode 100644 index 00000000..137ed78e --- /dev/null +++ b/_modules/dicee/models/static_funcs.html @@ -0,0 +1,247 @@ + + + + + + + + dicee.models.static_funcs - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.models.static_funcs

+from typing import Tuple
+import torch
+
+
+[docs] +def quaternion_mul(*, Q_1, Q_2) -> Tuple[torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor]: + """ + Perform quaternion multiplication + :param Q_1: + :param Q_2: + :return: + """ + + a_h, b_h, c_h, d_h = Q_1 + a_r, b_r, c_r, d_r = Q_2 + r_val = a_h * a_r - b_h * b_r - c_h * c_r - d_h * d_r + i_val = a_h * b_r + b_h * a_r + c_h * d_r - d_h * c_r + j_val = a_h * c_r - b_h * d_r + c_h * a_r + d_h * b_r + k_val = a_h * d_r + b_h * c_r - c_h * b_r + d_h * a_r + return r_val, i_val, j_val, k_val
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/query_generator.html b/_modules/dicee/query_generator.html new file mode 100644 index 00000000..130bd498 --- /dev/null +++ b/_modules/dicee/query_generator.html @@ -0,0 +1,797 @@ + + + + + + + + dicee.query_generator - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.query_generator

+from collections import defaultdict
+from typing import Union, Dict, List, Tuple
+
+import numpy as np
+import random
+import os
+import pickle
+from copy import deepcopy
+from .static_funcs import save_pickle, load_pickle
+
+
+
+[docs] +class QueryGenerator: + def __init__(self, train_path, val_path: str, test_path: str, ent2id, rel2id, seed: int, + gen_valid: bool = False, + gen_test: bool = True): + + self.train_path = train_path + self.val_path = val_path + self.test_path = test_path + self.gen_valid = gen_valid + self.gen_test = gen_test + + self.seed = seed + + self.max_ans_num = 1e6 + + self.mode = str + self.ent2id: Dict = ent2id + self.rel2id: Dict = rel2id + self.ent_in: Dict = {} + self.ent_out: Dict = {} + self.query_name_to_struct = {"1p": ['e', ['r']], + "2p": ['e', ['r', 'r']], + "3p": ['e', ['r', 'r', 'r']], + "2i": [['e', ['r']], ['e', ['r']]], + "3i": [['e', ['r']], ['e', ['r']], ['e', ['r']]], + "pi": [['e', ['r', 'r']], ['e', ['r']]], + "ip": [[['e', ['r']], ['e', ['r']]], ['r']], + "2in": [['e', ['r']], ['e', ['r', 'n']]], + "3in": [['e', ['r']], ['e', ['r']], ['e', ['r', 'n']]], + "pin": [['e', ['r', 'r']], ['e', ['r', 'n']]], + "pni": [['e', ['r', 'r', 'n']], ['e', ['r']]], + "inp": [[['e', ['r']], ['e', ['r', 'n']]], ['r']], + # union + "2u": [['e', ['r']], ['e', ['r']], ['u']], + "up": [[['e', ['r']], ['e', ['r']], ['u']], ['r']]} + self.set_global_seed(seed) + +
+[docs] + def list2tuple(self, list_data): + # @TODO: add description + return tuple(self.list2tuple(x) if isinstance(x, list) else x for x in list_data)
+ + +
+[docs] + def tuple2list(self, x: Union[List, Tuple]) -> Union[List, Tuple]: + """ + Convert a nested tuple to a nested list. + """ + if isinstance(x, tuple): + return [self.tuple2list(item) if isinstance(item, tuple) else item for item in x] + else: + return x
+ + +
+[docs] + def set_global_seed(self, seed: int): + """Set seed""" + np.random.seed(seed) + random.seed(seed)
+ + +
+[docs] + def construct_graph(self, paths: List[str]) -> Tuple[Dict, Dict]: + """ + Construct graph from triples + Returns dicts with incoming and outgoing edges + """ + # Mapping from tail entity and a relation to heads. + tail_relation_to_heads = defaultdict(lambda: defaultdict(set)) + # Mapping from head and relation to tails. + head_relation_to_tails = defaultdict(lambda: defaultdict(set)) + + for path in paths: + with open(path, "r") as f: + for line in f: + h, r, t = map(str, line.strip().split("\t")) + tail_relation_to_heads[self.ent2id[t]][self.rel2id[r]].add(self.ent2id[h]) + head_relation_to_tails[self.ent2id[h]][self.rel2id[r]].add(self.ent2id[t]) + + self.ent_in = tail_relation_to_heads + self.ent_out = head_relation_to_tails + + return tail_relation_to_heads, head_relation_to_tails
+ + +
+[docs] + def fill_query(self, query_structure: List[Union[str, List]], + ent_in: Dict, ent_out: Dict, + answer: int) -> bool: + """ + Private method for fill_query logic. + """ + assert isinstance(query_structure[-1], list) + all_relation_flag = True + for ele in query_structure[-1]: + if ele not in ['r', 'n']: + all_relation_flag = False + break + if all_relation_flag: + r = -1 + for i in range(len(query_structure[-1]))[::-1]: + if query_structure[-1][i] == 'n': + query_structure[-1][i] = -2 + continue + found = False + for j in range(40): + if len(ent_in[answer].keys()) < 1: + return True # not enough relations, return True to indicate broken flag + r_tmp = random.sample(ent_in[answer].keys(), 1)[0] + if r_tmp // 2 != r // 2 or r_tmp == r: + r = r_tmp + found = True + break + if not found: + return True + query_structure[-1][i] = r + answer = random.sample(ent_in[answer][r], 1)[0] + if query_structure[0] == 'e': + query_structure[0] = answer + else: + return self.fill_query(query_structure[0], ent_in, ent_out, answer) + else: + same_structure = defaultdict(list) + for i in range(len(query_structure)): + same_structure[self.list2tuple(query_structure[i])].append(i) + for i in range(len(query_structure)): + if len(query_structure[i]) == 1 and query_structure[i][0] == 'u': + assert i == len(query_structure) - 1 + query_structure[i][0] = -1 + continue + broken_flag = self.fill_query(query_structure[i], ent_in, ent_out, answer) + if broken_flag: + return True + for structure in same_structure: + if len(same_structure[structure]) != 1: + structure_set = set() + for i in same_structure[structure]: + structure_set.add(self.list2tuple(query_structure[i])) + if len(structure_set) < len(same_structure[structure]): + return True
+ + +
+[docs] + def achieve_answer(self, query: List[Union[str, List]], + ent_in: Dict, ent_out: Dict) -> set: + """ + Private method for achieve_answer logic. + @TODO: Document the code + """ + assert isinstance(query[-1], list) + all_relation_flag = True + for ele in query[-1]: + # @TODO: unclear + if isinstance(ele,int) or (ele == -1): + all_relation_flag = False + break + if all_relation_flag: + if isinstance(query[0], int): + # @TODO: unclear + ent_set = set([query[0]]) + else: + ent_set = self.achieve_answer(query[0], ent_in, ent_out) + for i in range(len(query[-1])): + if query[-1][i] == -2: + ent_set = set(range(len(ent_in))) - ent_set + else: + ent_set_traverse = set() + for ent in ent_set: + ent_set_traverse = ent_set_traverse.union(ent_out[ent][query[-1][i]]) + ent_set = ent_set_traverse + else: + ent_set = self.achieve_answer(query[0], ent_in, ent_out) + union_flag = False + if len(query[-1]) == 1 and query[-1][0] == -1: + union_flag = True + for i in range(1, len(query)): + if not union_flag: + ent_set = ent_set.intersection(self.achieve_answer(query[i], ent_in, ent_out)) + else: + if i == len(query) - 1: + continue + ent_set = ent_set.union(self.achieve_answer(query[i], ent_in, ent_out)) + return ent_set
+ + + + + +
+[docs] + def ground_queries(self, query_structure: List[Union[str, List]], + ent_in: Dict, ent_out: Dict, small_ent_in: Dict, small_ent_out: Dict, + gen_num: int, query_name: str): + """Generating queries and achieving answers""" + (num_sampled, num_try, num_repeat, num_more_answer, num_broken, num_no_extra_answer, + num_no_extra_negative, num_empty) = 0, 0, 0, 0, 0, 0, 0, 0 + tp_ans_num, fp_ans_num, fn_ans_num = [], [], [] + queries = defaultdict(set) + tp_answers = defaultdict(set) + fp_answers = defaultdict(set) + fn_answers = defaultdict(set) + + # @TODO: Incorrect reasoning: It can enter an infinite loop + while num_sampled < gen_num: + if num_try == 100_000: + break + num_try += 1 + # @TODO: Why do we need a deep copy here ? + query = deepcopy(query_structure) + answer = random.sample(list(ent_in.keys()), 1)[0] + broken_flag = self.fill_query(query, ent_in, ent_out, answer) + + if broken_flag: + num_broken += 1 + continue + + answer_set = self.achieve_answer(query, ent_in, ent_out) + small_answer_set = self.achieve_answer(query, small_ent_in, small_ent_out) + + if len(answer_set) == 0: + num_empty += 1 + continue + + if len(answer_set - small_answer_set) == 0: + num_no_extra_answer += 1 + continue + + if 'n' in query_name: + if len(small_answer_set - answer_set) == 0: + num_no_extra_negative += 1 + continue + + if max(len(answer_set - small_answer_set), len(small_answer_set - answer_set)) > self.max_ans_num: + num_more_answer += 1 + print(num_more_answer) + continue + + if self.list2tuple(query) in queries[self.list2tuple(query_structure)]: + num_repeat += 1 + continue + + queries[self.list2tuple(query_structure)].add(self.list2tuple(query)) + tp_answers[self.list2tuple(query)] = small_answer_set + fp_answers[self.list2tuple(query)] = small_answer_set - answer_set + fn_answers[self.list2tuple(query)] = answer_set - small_answer_set + + num_sampled += 1 + tp_ans_num.append(len(tp_answers[self.list2tuple(query)])) + fp_ans_num.append(len(fp_answers[self.list2tuple(query)])) + fn_ans_num.append(len(fn_answers[self.list2tuple(query)])) + + return queries, tp_answers, fp_answers, fn_answers
+ + +
+[docs] + def unmap(self, query_type, queries, tp_answers, fp_answers, fn_answers): + + # Create id2ent dictionary + id2ent = {v: k for k, v in self.ent2id.items()} + id2rel = {v: k for k, v in self.rel2id.items()} + + # Unmap queries and create a mapping from ID-based queries to text-based queries + unmapped_queries_dict = defaultdict(set) + query_id_to_text = {} + for query_structure_tuple, query_set in queries.items(): + for query in query_set: + unmapped_query = self.unmap_query(query_structure_tuple, query, id2ent, id2rel) + unmapped_queries_dict[query_structure_tuple].add(unmapped_query) + query_id_to_text[query] = unmapped_query + + easy_answers = defaultdict(set) + false_positives = defaultdict(set) + hard_answers = defaultdict(set) + for query, answer_set in tp_answers.items(): + unmapped_answer_set = {id2ent[answer] for answer in answer_set} + easy_answers[query_id_to_text[query]] = unmapped_answer_set + + # Unmap fp_answers and update to false_positives + for query, answer_set in fp_answers.items(): + unmapped_answer_set = {id2ent[answer] for answer in answer_set} + false_positives[query_id_to_text[query]] = unmapped_answer_set + + # Unmap fn_answers and update to hard_answers + for query, answer_set in fn_answers.items(): + unmapped_answer_set = {id2ent[answer] for answer in answer_set} + hard_answers[query_id_to_text[query]] = unmapped_answer_set + + return unmapped_queries_dict, easy_answers, false_positives, hard_answers
+ + +
+[docs] + def unmap_query(self, query_structure, query, id2ent, id2rel): + # 2i + if query_structure == (("e", ("r",)), ("e", ("r",))): + ent1, (rel1_id,) = query[0] + ent2, (rel2_id,) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + return ((ent1, (rel1,)), (ent2, (rel2,))) + # 3i + elif query_structure == (("e", ("r",)), ("e", ("r",)), ("e", ("r",))): + ent1, (rel1_id,) = query[0] + ent2, (rel2_id,) = query[1] + ent3, (rel3_id,) = query[2] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + ent3 = id2ent[ent3] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return ((ent1, (rel1,)), (ent2, (rel2,)), (ent3, (rel3,))) + # 1p + elif query_structure == ("e", ("r",)): + ent1, (rel1_id,) = query + ent1 = id2ent[ent1] + rel1 = id2rel[rel1_id] + return (ent1, (rel1,)) + # 2p + elif query_structure == ("e", ("r", "r")): + ent1, (rel1_id, rel2_id) = query + ent1 = id2ent[ent1] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + return (ent1, (rel1, rel2)) + # 3p + elif query_structure == ("e", ("r", "r", "r")): + ent1, (rel1_id, rel2_id, rel3_id) = query + ent1 = id2ent[ent1] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return (ent1, (rel1, rel2, rel3)) + # pi + elif query_structure == (("e", ("r", "r")), ("e", ("r",))): + ent1, (rel1_id, rel2_id) = query[0] + ent2, (rel3_id,) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return ((ent1, (rel1, rel2)), (ent2, (rel3,))) + # ip + elif query_structure == ((("e", ("r",)), ("e", ("r",))), ("r",)): + ent1, (rel1_id,) = query[0][0] + ent2, (rel2_id,) = query[0][1] + (rel3_id,) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return (((ent1, (rel1,)), (ent2, (rel2,))), (rel3,)) + # negation + # 2in + elif query_structure == (("e", ("r",)), ("e", ("r", "n"))): + ent1, (rel1_id,) = query[0] + ent2, (rel2_id, negation) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + return ((ent1, (rel1,)), (ent2, (rel2, "not"))) + # 3in + elif query_structure == (("e", ("r",)), ("e", ("r",)), ("e", ("r", "n"))): + ent1, (rel1_id,) = query[0] + ent2, (rel2_id,) = query[1] + ent3, (rel3_id, negation) = query[2] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + ent3 = id2ent[ent3] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return ((ent1, (rel1,)), (ent2, (rel2,)), (ent3, (rel3, "not"))) + # pin + elif query_structure == (("e", ("r", "r")), ("e", ("r", "n"))): + ent1, (rel1_id, rel2_id) = query[0] + ent2, (rel3_id, negation) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return ((ent1, (rel1, rel2)), (ent2, (rel3, "not"))) + # inp + elif query_structure == ((("e", ("r",)), ("e", ("r", "n"))), ("r",)): + ent1, (rel1_id,) = query[0][0] + ent2, (rel2_id, negation) = query[0][1] + (rel3_id,) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return (((ent1, (rel1,)), (ent2, (rel2, "not"))), (rel3,)) + # pni + elif query_structure == (("e", ("r", "r", "n")), ("e", ("r",))): + ent1, (rel1_id, rel2_id, negation) = query[0] + ent2, (rel3_id,) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return ((ent1, (rel1, rel2, "not")), (ent2, (rel3,))) + # union + # 2u + elif query_structure == (("e", ("r",)), ("e", ("r",)), ("u",)): + ent1, (rel1_id,) = query[0] + ent2, (rel2_id,) = query[1] + + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + return ((ent1, (rel1,)), (ent2, (rel2,)), ("union",)) + # up + elif query_structure == ((("e", ("r",)), ("e", ("r",)), ("u",)), ("r",)): + ent1, (rel1_id,) = query[0][0] + ent2, (rel2_id,) = query[0][1] + (rel3_id,) = query[1] + ent1 = id2ent[ent1] + ent2 = id2ent[ent2] + rel1 = id2rel[rel1_id] + rel2 = id2rel[rel2_id] + rel3 = id2rel[rel3_id] + return (((ent1, (rel1,)), (ent2, (rel2,)), ("union",)), (rel3,))
+ + +
+[docs] + def generate_queries(self, query_struct, gen_num: int, query_type: str): + """ + Passing incoming and outgoing edges to ground queries depending on mode [train valid or text] + and getting queries and answers in return + """ + train_tail_relation_to_heads, train_head_relation_to_tails = self.construct_graph(paths=[self.train_path]) + val_tail_relation_to_heads, val_head_relation_to_tails = self.construct_graph( + paths=[self.train_path, self.val_path]) + # ?! + valid_only_ent_in, valid_only_ent_out = self.construct_graph(paths=[self.val_path, self.test_path]) + + test_tail_relation_to_heads, test_head_relation_to_tails = self.construct_graph( + paths=[self.train_path, self.val_path, self.test_path]) + # ?! + test_only_ent_in, test_only_ent_out = self.construct_graph(paths=[self.test_path]) + self.mode = 'test' + test_queries, test_tp_answers, test_fp_answers, test_fn_answers = self.ground_queries( + query_struct, test_tail_relation_to_heads, test_head_relation_to_tails, val_tail_relation_to_heads, + val_head_relation_to_tails, gen_num, query_type) + # @TODO: test_queries has keys that are tuple ,e.g. ('e', ('r',)) + # Yet, query structure defined as a list ['e', ['r']]. + # Fix this inconsistency + print( + f"General structure is {query_struct} with name {query_type}. Number of queries generated: {len(test_tp_answers)}") + return test_queries, test_tp_answers, test_fp_answers, test_fn_answers
+ + +
+[docs] + def save_queries(self, query_type: str, gen_num: int, save_path: str): + """ + + """ + + # Find the index of query_type in query_names + try: + gen_id = self.query_names.index(query_type) + except ValueError: + print(f"Invalid query_type: {query_type}") + return [] + queries, tp_answers, fp_answers, fn_answers = self.generate_queries(self.query_structures[gen_id:gen_id + 1], + gen_num, query_type) + unmapped_queries, easy_answers, false_positives, hard_answers = self.unmap(query_type, queries, tp_answers, + fp_answers, fn_answers) + + # Save the unmapped queries and answers + name_to_save = f'{self.mode}-{query_type}' + if not os.path.isdir(save_path): + os.makedirs(save_path) + with open(f'{save_path}/{name_to_save}-queries.pkl', 'wb') as f: + pickle.dump(unmapped_queries, f) + with open(f'{save_path}/{name_to_save}-easy-answers.pkl', 'wb') as f: + pickle.dump(easy_answers, f) + with open(f'{save_path}/{name_to_save}-false-positives.pkl', 'wb') as f: + pickle.dump(false_positives, f) + with open(f'{save_path}/{name_to_save}-hard-answers.pkl', 'wb') as f: + pickle.dump(hard_answers, f)
+ + +
+[docs] + def load_queries(self, path): + raise NotImplementedError()
+ + +
+[docs] + def get_queries(self, query_type: str, gen_num: int): + + queries, tp_answers, fp_answers, fn_answers = self.generate_queries(self.query_name_to_struct[query_type], + gen_num, query_type) + unmapped_queries, easy_answers, false_positives, hard_answers = self.unmap(query_type, queries, tp_answers, + fp_answers, fn_answers) + return unmapped_queries, easy_answers, false_positives, hard_answers
+ + +
+[docs] + @staticmethod + def save_queries_and_answers(path: str, data: List[Tuple[str, Tuple[defaultdict]]]) -> None: + """ Save Queries into Disk""" + save_pickle(file_path=path, data=data)
+ + +
+[docs] + @staticmethod + def load_queries_and_answers(path: str) -> List[Tuple[str, Tuple[defaultdict]]]: + """ Load Queries from Disk to Memory""" + print("Loading...") + data = load_pickle(file_path=path) + assert isinstance(data, list) + assert isinstance(data[0], tuple) + assert isinstance(data[0][0], str) + assert isinstance(data[0][1], tuple) + return data
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/read_preprocess_save_load_kg/preprocess.html b/_modules/dicee/read_preprocess_save_load_kg/preprocess.html new file mode 100644 index 00000000..5d1ec4eb --- /dev/null +++ b/_modules/dicee/read_preprocess_save_load_kg/preprocess.html @@ -0,0 +1,492 @@ + + + + + + + + dicee.read_preprocess_save_load_kg.preprocess - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.read_preprocess_save_load_kg.preprocess

+import pandas as pd
+import polars as pl
+from .util import create_recipriocal_triples, timeit, index_triples_with_pandas, dataset_sanity_checking
+from dicee.static_funcs import numpy_data_type_changer
+
+
+
+[docs] +class PreprocessKG: + """ Preprocess the data in memory """ + + def __init__(self, kg): + self.kg = kg + +
+[docs] + def start(self) -> None: + """ + Preprocess train, valid and test datasets stored in knowledge graph instance + + Parameter + --------- + + Returns + ------- + None + """ + if self.kg.backend == "polars": + self.preprocess_with_polars() + elif self.kg.backend in ["pandas", "rdflib"]: + self.preprocess_with_pandas() + else: + raise KeyError(f'{self.kg.backend} not found') + print('Finding suitable integer type for the index...') + self.kg.train_set = numpy_data_type_changer(self.kg.train_set, + num=max(self.kg.num_entities, self.kg.num_relations)) + if self.kg.valid_set is not None: + self.kg.valid_set = numpy_data_type_changer(self.kg.valid_set, + num=max(self.kg.num_entities, self.kg.num_relations)) + if self.kg.test_set is not None: + self.kg.test_set = numpy_data_type_changer(self.kg.test_set, + num=max(self.kg.num_entities, self.kg.num_relations))
+ + +
+[docs] + @timeit + def preprocess_with_pandas(self) -> None: + """ + Preprocess train, valid and test datasets stored in knowledge graph instance with pandas + + (1) Add recipriocal or noisy triples + (2) Construct vocabulary + (3) Index datasets + + Parameter + --------- + + Returns + ------- + None + """ + # (1) Add recipriocal or noisy triples. + self.apply_reciprical_or_noise() + # (2) Construct integer indexing for entities and relations. + self.sequential_vocabulary_construction() + self.kg.num_entities, self.kg.num_relations = len(self.kg.entity_to_idx), len(self.kg.relation_to_idx) + + # (3) Index datasets + self.kg.train_set = index_triples_with_pandas(self.kg.train_set, + self.kg.entity_to_idx, + self.kg.relation_to_idx) + assert isinstance(self.kg.train_set, pd.core.frame.DataFrame) + self.kg.train_set = self.kg.train_set.values + self.kg.train_set = numpy_data_type_changer(self.kg.train_set, + num=max(self.kg.num_entities, self.kg.num_relations)) + dataset_sanity_checking(self.kg.train_set, self.kg.num_entities, self.kg.num_relations) + if self.kg.valid_set is not None: + self.kg.valid_set = index_triples_with_pandas(self.kg.valid_set, self.kg.entity_to_idx, + self.kg.relation_to_idx) + self.kg.valid_set = self.kg.valid_set.values + dataset_sanity_checking(self.kg.valid_set, self.kg.num_entities, self.kg.num_relations) + self.kg.valid_set = numpy_data_type_changer(self.kg.valid_set, + num=max(self.kg.num_entities, self.kg.num_relations)) + + if self.kg.test_set is not None: + self.kg.test_set = index_triples_with_pandas(self.kg.test_set, self.kg.entity_to_idx, + self.kg.relation_to_idx) + # To numpy + self.kg.test_set = self.kg.test_set.values + dataset_sanity_checking(self.kg.test_set, self.kg.num_entities, self.kg.num_relations) + self.kg.test_set = numpy_data_type_changer(self.kg.test_set, + num=max(self.kg.num_entities, self.kg.num_relations))
+ +
+[docs] + @timeit + def preprocess_with_polars(self) -> None: + print(f'*** Preprocessing Train Data:{self.kg.train_set.shape} with Polars ***') + # (1) Add reciprocal triples, e.g. KG:= {(s,p,o)} union {(o,p_inverse,s)} + if self.kg.add_reciprical and self.kg.eval_model: + def adding_reciprocal_triples(): + """ Add reciprocal triples """ + # (1.1) Add reciprocal triples into training set + self.kg.train_set.extend(self.kg.train_set.select([ + pl.col("object").alias('subject'), + pl.col("relation").apply(lambda x: x + '_inverse'), + pl.col("subject").alias('object') + ])) + if self.kg.valid_set is not None: + # (1.2) Add reciprocal triples into valid_set set. + self.kg.valid_set.extend(self.kg.valid_set.select([ + pl.col("object").alias('subject'), + pl.col("relation").apply(lambda x: x + '_inverse'), + pl.col("subject").alias('object') + ])) + if self.kg.test_set is not None: + # (1.2) Add reciprocal triples into test set. + self.kg.test_set.extend(self.kg.test_set.select([ + pl.col("object").alias('subject'), + pl.col("relation").apply(lambda x: x + '_inverse'), + pl.col("subject").alias('object') + ])) + + print('Adding Reciprocal Triples...') + adding_reciprocal_triples() + + # (2) Type checking + try: + assert isinstance(self.kg.train_set, pl.DataFrame) + except TypeError: + raise TypeError(f"{type(self.kg.train_set)}") + assert isinstance(self.kg.valid_set, pl.DataFrame) or self.kg.valid_set is None + assert isinstance(self.kg.test_set, pl.DataFrame) or self.kg.test_set is None + + def concat_splits(train, val, test): + x = [train] + if val is not None: + x.append(val) + if test is not None: + x.append(test) + return pl.concat(x) + + print('Concat Splits...') + df_str_kg = concat_splits(self.kg.train_set, self.kg.valid_set, self.kg.test_set) + + print('Entity Indexing...') + self.kg.entity_to_idx = pl.concat((df_str_kg['subject'], + df_str_kg['object'])).unique(maintain_order=True).rename('entity') + print('Relation Indexing...') + self.kg.relation_to_idx =df_str_kg['relation'].unique(maintain_order=True) + print('Creating index for entities...') + self.kg.entity_to_idx = {ent: idx for idx, ent in enumerate(self.kg.entity_to_idx.to_list())} + print('Creating index for relations...') + self.kg.relation_to_idx = {rel: idx for idx, rel in enumerate(self.kg.relation_to_idx.to_list())} + self.kg.num_entities, self.kg.num_relations = len(self.kg.entity_to_idx), len(self.kg.relation_to_idx) + + print(f'Indexing Training Data {self.kg.train_set.shape}...') + self.kg.train_set = self.kg.train_set.with_columns( + pl.col("subject").map_dict(self.kg.entity_to_idx).alias("subject"), + pl.col("relation").map_dict(self.kg.relation_to_idx).alias("relation"), + pl.col("object").map_dict(self.kg.entity_to_idx).alias("object")).to_numpy() + if self.kg.valid_set is not None: + print(f'Indexing Val Data {self.kg.valid_set.shape}...') + self.kg.valid_set = self.kg.valid_set.with_columns( + pl.col("subject").map_dict(self.kg.entity_to_idx).alias("subject"), + pl.col("relation").map_dict(self.kg.relation_to_idx).alias("relation"), + pl.col("object").map_dict(self.kg.entity_to_idx).alias("object")).to_numpy() + if self.kg.test_set is not None: + print(f'Indexing Test Data {self.kg.test_set.shape}...') + self.kg.test_set = self.kg.test_set.with_columns( + pl.col("subject").map_dict(self.kg.entity_to_idx).alias("subject"), + pl.col("relation").map_dict(self.kg.relation_to_idx).alias("relation"), + pl.col("object").map_dict(self.kg.entity_to_idx).alias("object")).to_numpy() + print(f'*** Preprocessing Train Data:{self.kg.train_set.shape} with Polars DONE ***')
+ + +
+[docs] + def sequential_vocabulary_construction(self) -> None: + """ + (1) Read input data into memory + (2) Remove triples with a condition + (3) Serialize vocabularies in a pandas dataframe where + => the index is integer and + => a single column is string (e.g. URI) + """ + try: + assert isinstance(self.kg.train_set, pd.DataFrame) + except AssertionError: + print(type(self.kg.train_set)) + print('HEREE') + exit(1) + assert isinstance(self.kg.valid_set, pd.DataFrame) or self.kg.valid_set is None + assert isinstance(self.kg.test_set, pd.DataFrame) or self.kg.test_set is None + + # (4) Remove triples from (1). + self.remove_triples_from_train_with_condition() + # Concatenate dataframes. + print('Concatenating data to obtain index...') + x = [self.kg.train_set] + if self.kg.valid_set is not None: + x.append(self.kg.valid_set) + if self.kg.test_set is not None: + x.append(self.kg.test_set) + df_str_kg = pd.concat(x, ignore_index=True) + del x + print('Creating a mapping from entities to integer indexes...') + # (5) Create a bijection mapping from entities of (2) to integer indexes. + # ravel('K') => Return a contiguous flattened array. + # ‘K’ means to read the elements in the order they occur in memory, + # except for reversing the data when strides are negative. + ordered_list = pd.unique(df_str_kg[['subject', 'object']].values.ravel('K')).tolist() + self.kg.entity_to_idx = {k: i for i, k in enumerate(ordered_list)} + # 5. Create a bijection mapping from relations to integer indexes. + ordered_list = pd.unique(df_str_kg['relation'].values.ravel('K')).tolist() + self.kg.relation_to_idx = {k: i for i, k in enumerate(ordered_list)} + del ordered_list
+ + +
+[docs] + def remove_triples_from_train_with_condition(self): + if None: + # self.kg.min_freq_for_vocab is not + assert isinstance(self.kg.min_freq_for_vocab, int) + assert self.kg.min_freq_for_vocab > 0 + print( + f'[5 / 14] Dropping triples having infrequent entities or relations (>{self.kg.min_freq_for_vocab})...', + end=' ') + num_triples = self.kg.train_set.size + print('Total num triples:', num_triples, end=' ') + # Compute entity frequency: index is URI, val is number of occurrences. + entity_frequency = pd.concat([self.kg.train_set['subject'], self.kg.train_set['object']]).value_counts() + relation_frequency = self.kg.train_set['relation'].value_counts() + + # low_frequency_entities index and values are the same URIs: dask.dataframe.core.DataFrame + low_frequency_entities = entity_frequency[ + entity_frequency <= self.kg.min_freq_for_vocab].index.values + low_frequency_relation = relation_frequency[ + relation_frequency <= self.kg.min_freq_for_vocab].index.values + # If triple contains subject that is in low_freq, set False do not select + self.kg.train_set = self.kg.train_set[~self.kg.train_set['subject'].isin(low_frequency_entities)] + # If triple contains object that is in low_freq, set False do not select + self.kg.train_set = self.kg.train_set[~self.kg.train_set['object'].isin(low_frequency_entities)] + # If triple contains relation that is in low_freq, set False do not select + self.kg.train_set = self.kg.train_set[~self.kg.train_set['relation'].isin(low_frequency_relation)] + # print('\t after dropping:', df_str_kg.size.compute(scheduler=scheduler_flag)) + print('\t after dropping:', self.kg.train_set.size) # .compute(scheduler=scheduler_flag)) + del low_frequency_entities
+ +
+[docs] + def apply_reciprical_or_noise(self) -> None: + """ (1) Add reciprocal triples (2) Add noisy triples """ + # (1) Add reciprocal triples, e.g. KG:= {(s,p,o)} union {(o,p_inverse,s)} + if self.kg.add_reciprical and self.kg.eval_model: + print('Adding reciprocal triples ' + 'to train, validation, and test sets, e.g. KG:= {(s,p,o)} union {(o,p_inverse,s)}') + self.kg.train_set = create_recipriocal_triples(self.kg.train_set) + if self.kg.valid_set is not None: + self.kg.valid_set = create_recipriocal_triples(self.kg.valid_set) + if self.kg.test_set is not None: + self.kg.test_set = create_recipriocal_triples(self.kg.test_set)
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/read_preprocess_save_load_kg/read_from_disk.html b/_modules/dicee/read_preprocess_save_load_kg/read_from_disk.html new file mode 100644 index 00000000..350f464e --- /dev/null +++ b/_modules/dicee/read_preprocess_save_load_kg/read_from_disk.html @@ -0,0 +1,309 @@ + + + + + + + + dicee.read_preprocess_save_load_kg.read_from_disk - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.read_preprocess_save_load_kg.read_from_disk

+from .util import read_from_disk, read_from_triple_store
+import glob
+import pandas as pd
+import numpy as np
+
+
+
+[docs] +class ReadFromDisk: + """Read the data from disk into memory""" + + def __init__(self, kg): + self.kg = kg + +
+[docs] + def start(self) -> None: + """ + Read a knowledge graph from disk into memory + + Data will be available at the train_set, test_set, valid_set attributes. + + Parameter + --------- + None + + Returns + ------- + None + """ + if self.kg.path_single_kg is not None: + self.kg.train_set = read_from_disk(self.kg.path_single_kg, + self.kg.read_only_few, + self.kg.sample_triples_ratio, + backend=self.kg.backend) + if self.kg.add_noise_rate: + self.add_noisy_triples_into_training() + + self.kg.valid_set = None + self.kg.test_set = None + elif self.kg.sparql_endpoint is not None: + self.kg.train_set = read_from_triple_store(endpoint=self.kg.sparql_endpoint) + self.kg.valid_set = None + self.kg.test_set = None + elif self.kg.dataset_dir: + for i in glob.glob(self.kg.dataset_dir + '/*'): + if 'train' in i: + self.kg.train_set = read_from_disk(i, self.kg.read_only_few, self.kg.sample_triples_ratio, + backend=self.kg.backend) + if self.kg.add_noise_rate: + self.add_noisy_triples_into_training() + + elif 'test' in i and self.kg.eval_model is not None: + self.kg.test_set = read_from_disk(i, backend=self.kg.backend) + elif 'valid' in i and self.kg.eval_model is not None: + self.kg.valid_set = read_from_disk(i, backend=self.kg.backend) + else: + print(f'Unrecognized data {i}') + else: + raise RuntimeError(f"Invalid data:{self.kg.data_dir}\t{self.kg.sparql_endpoint}\t{self.kg.path_single_kg}")
+ + +
+[docs] + def add_noisy_triples_into_training(self): + num_noisy_triples = int(len(self.kg.train_set) * self.kg.add_noise_rate) + s = len(self.kg.train_set) + # @TODO: Can we use polars here ? + list_of_entities = pd.unique(self.kg.train_set[['subject', 'object']].values.ravel('K')) + self.kg.train_set = pd.concat([self.kg.train_set, + # Noisy triples + pd.DataFrame( + {'subject': np.random.choice(list_of_entities, num_noisy_triples), + 'relation': np.random.choice( + pd.unique(self.kg.train_set[['relation']].values.ravel('K')), + num_noisy_triples), + 'object': np.random.choice(list_of_entities, num_noisy_triples)} + ) + ], ignore_index=True) + + assert s + num_noisy_triples == len(self.kg.train_set)
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/read_preprocess_save_load_kg/save_load_disk.html b/_modules/dicee/read_preprocess_save_load_kg/save_load_disk.html new file mode 100644 index 00000000..87eef0ce --- /dev/null +++ b/_modules/dicee/read_preprocess_save_load_kg/save_load_disk.html @@ -0,0 +1,307 @@ + + + + + + + + dicee.read_preprocess_save_load_kg.save_load_disk - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.read_preprocess_save_load_kg.save_load_disk

+import numpy as np
+import concurrent
+from .util import load_pickle, get_er_vocab, get_re_vocab, get_ee_vocab, create_constraints, load_numpy_ndarray
+import os
+from dicee.static_funcs import save_pickle, save_numpy_ndarray
+
+
+
+[docs] +class LoadSaveToDisk: + def __init__(self, kg): + self.kg = kg + +
+[docs] + def save(self): + assert self.kg.path_for_deserialization is None + + if self.kg.path_for_serialization is None: + # No serialization + return None + + assert isinstance(self.kg.entity_to_idx, dict) + assert isinstance(self.kg.relation_to_idx, dict) + assert isinstance(self.kg.train_set, np.ndarray) + + # (1) Save dictionary mappings into disk + save_pickle(data=self.kg.entity_to_idx, file_path=self.kg.path_for_serialization + '/entity_to_idx.p') + save_pickle(data=self.kg.relation_to_idx, file_path=self.kg.path_for_serialization + '/relation_to_idx.p') + + save_numpy_ndarray(data=self.kg.train_set, file_path=self.kg.path_for_serialization + '/train_set.npy') + if self.kg.valid_set is not None: + save_numpy_ndarray(data=self.kg.valid_set, file_path=self.kg.path_for_serialization + '/valid_set.npy') + if self.kg.test_set is not None: + save_numpy_ndarray(data=self.kg.test_set, file_path=self.kg.path_for_serialization + '/test_set.npy') + + if self.kg.eval_model: + if self.kg.valid_set is not None and self.kg.test_set is not None: + assert isinstance(self.kg.valid_set, np.ndarray) and isinstance(self.kg.test_set, np.ndarray) + data = np.concatenate([self.kg.train_set, self.kg.valid_set, self.kg.test_set]) + else: + data = self.kg.train_set + # We need to parallelise the next four steps. + print('Submit er-vocab, re-vocab, and ee-vocab via ProcessPoolExecutor...') + executor = concurrent.futures.ProcessPoolExecutor() + self.kg.er_vocab = executor.submit(get_er_vocab, data, self.kg.path_for_serialization + '/er_vocab.p') + self.kg.re_vocab = executor.submit(get_re_vocab, data, self.kg.path_for_serialization + '/re_vocab.p') + self.kg.ee_vocab = executor.submit(get_ee_vocab, data, self.kg.path_for_serialization + '/ee_vocab.p') + self.kg.constraints = executor.submit(create_constraints, self.kg.train_set, + self.kg.path_for_serialization + '/constraints.p') + self.kg.domain_constraints_per_rel, self.kg.range_constraints_per_rel = None, None
+ + +
+[docs] + def load(self): + assert self.kg.path_for_deserialization is not None + assert self.kg.path_for_serialization == self.kg.path_for_deserialization + + self.kg.entity_to_idx = load_pickle(file_path=self.kg.path_for_deserialization + '/entity_to_idx.p') + self.kg.relation_to_idx = load_pickle(file_path=self.kg.path_for_deserialization + '/relation_to_idx.p') + assert isinstance(self.kg.entity_to_idx, dict) + assert isinstance(self.kg.relation_to_idx, dict) + self.kg.num_entities = len(self.kg.entity_to_idx) + self.kg.num_relations = len(self.kg.relation_to_idx) + + self.kg.train_set = load_numpy_ndarray(file_path=self.kg.path_for_deserialization + '/train_set.npy') + + if os.path.isfile(self.kg.path_for_deserialization + '/valid_set.npy'): + self.kg.valid_set = load_numpy_ndarray(file_path=self.kg.path_for_deserialization + '/valid_set.npy') + if os.path.isfile(self.kg.path_for_deserialization + '/test_set.npy'): + self.kg.test_set = load_numpy_ndarray(file_path=self.kg.path_for_deserialization + '/test_set.npy') + + if self.kg.eval_model: + self.kg.er_vocab = load_pickle(file_path=self.kg.path_for_deserialization + '/er_vocab.p') + self.kg.re_vocab = load_pickle(file_path=self.kg.path_for_deserialization + '/re_vocab.p') + self.kg.ee_vocab = load_pickle(file_path=self.kg.path_for_deserialization + '/ee_vocab.p') + self.kg.domain_constraints_per_rel, self.kg.range_constraints_per_rel = load_pickle( + file_path=self.kg.path_for_deserialization + '/constraints.p')
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/read_preprocess_save_load_kg/util.html b/_modules/dicee/read_preprocess_save_load_kg/util.html new file mode 100644 index 00000000..86db447a --- /dev/null +++ b/_modules/dicee/read_preprocess_save_load_kg/util.html @@ -0,0 +1,627 @@ + + + + + + + + dicee.read_preprocess_save_load_kg.util - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.read_preprocess_save_load_kg.util

+from collections import defaultdict
+import numpy as np
+import polars
+import glob
+import time
+import functools
+import pandas as pd
+import pickle
+import os
+import psutil
+import requests
+from rdflib import Graph
+
+
+
+[docs] +def timeit(func): + @functools.wraps(func) + def timeit_wrapper(*args, **kwargs): + start_time = time.perf_counter() + result = func(*args, **kwargs) + end_time = time.perf_counter() + total_time = end_time - start_time + print( + f'{func.__name__} took {total_time:.4f} seconds ' + f'| Current Memory Usage {psutil.Process(os.getpid()).memory_info().rss / 1000000: .5} in MB') + return result + + return timeit_wrapper
+ + + +
+[docs] +@timeit +def read_with_polars(data_path, read_only_few: int = None, sample_triples_ratio: float = None) -> polars.DataFrame: + """ Load and Preprocess via Polars """ + print(f'*** Reading {data_path} with Polars ***') + # (1) Load the data. + if data_path[-3:] in ['txt', 'csv']: + print('Reading with polars.read_csv with sep **t** ...') + df = polars.read_csv(data_path, + has_header=False, + low_memory=False, + n_rows=None if read_only_few is None else read_only_few, + columns=[0, 1, 2], + dtypes=[polars.Utf8], # str + new_columns=['subject', 'relation', 'object'], + separator="\t") # \s+ doesn't work for polars + else: + if read_only_few is None: + df = polars.read_parquet(data_path, use_pyarrow=True) + else: + df = polars.read_parquet(data_path, n_rows=read_only_few) + # (2) Sample from (1). + if sample_triples_ratio: + print(f'Subsampling {sample_triples_ratio} of input data {df.shape}...') + df = df.sample(frac=sample_triples_ratio) + print(df.shape) + + # (3) Type heuristic prediction: If KG is an RDF KG, remove all triples where subject is not <?>. + h = df.head().to_pandas() + if sum(h["subject"].str.startswith('<')) + sum(h["relation"].str.startswith('<')) > 2: + print('Removing triples with literal values...') + df = df.filter(polars.col("object").str.starts_with('<')) + return df
+ + + +
+[docs] +@timeit +def read_with_pandas(data_path, read_only_few: int = None, sample_triples_ratio: float = None): + print(f'*** Reading {data_path} with Pandas ***') + if data_path[-3:] in ["ttl", 'txt', 'csv', 'zst']: + print('Reading with pandas.read_csv with sep ** s+ ** ...') + df = pd.read_csv(data_path, + sep="\s+", + header=None, + nrows=None if read_only_few is None else read_only_few, + usecols=[0, 1, 2], + names=['subject', 'relation', 'object'], + dtype=str) + else: + df = pd.read_parquet(data_path, engine='pyarrow') + # (2)a Read only few if it is asked. + if isinstance(read_only_few, int): + if read_only_few > 0: + print(f'Reading only few input data {read_only_few}...') + df = df.head(read_only_few) + print('Done !\n') + # (3) Read only sample + if sample_triples_ratio: + print(f'Subsampling {sample_triples_ratio} of input data...') + df = df.sample(frac=sample_triples_ratio) + print('Done !\n') + if sum(df.head()["subject"].str.startswith('<')) + sum(df.head()["relation"].str.startswith('<')) > 2: + # (4) Drop Rows/triples with double or boolean: Example preprocessing + # Drop of object does not start with **<**. + # Specifying na to be False instead of NaN. + print('Removing triples with literal values...') + df = df[df["object"].str.startswith('<', na=False)] + print('Done !\n') + return df
+ + + +
+[docs] +def read_from_disk(data_path: str, read_only_few: int = None, + sample_triples_ratio: float = None, backend=None): + assert backend + # If path exits + if glob.glob(data_path): + # format of the data + dformat = data_path[data_path.find(".") + 1:] + if dformat in ["ttl", "owl", "turtle", "rdf/xml"] and backend != "rdflib": + raise RuntimeError( + f"Data with **{dformat}** format cannot be read via --backend pandas or polars. Use --backend rdflib") + + if backend == 'pandas': + return read_with_pandas(data_path, read_only_few, sample_triples_ratio) + elif backend == 'polars': + return read_with_polars(data_path, read_only_few, sample_triples_ratio) + elif backend == "rdflib": + try: + assert dformat in ["ttl", "owl", "nt", "turtle", "rdf/xml", "n3", " n-triples"] + except AssertionError: + raise AssertionError(f"--backend {backend} and dataformat **{dformat}** is not matching. " + f"Use --backend pandas") + return pd.DataFrame(data=[(str(s), str(p), str(o)) for s, p, o in Graph().parse(data_path)], + columns=['subject', 'relation', 'object'], dtype=str) + else: + raise RuntimeError(f'--backend {backend} and {data_path} is not matching') + else: + print(f'{data_path} could not found!') + return None
+ + + +
+[docs] +def read_from_triple_store(endpoint: str = None): + """ Read triples from triple store into pandas dataframe """ + assert endpoint is not None + assert isinstance(endpoint, str) + query = """SELECT ?subject ?predicate ?object WHERE { ?subject ?predicate ?object}""" + response = requests.post(endpoint, data={'query': query}) + assert response.ok + # Generator + triples = ([triple['subject']['value'], triple['predicate']['value'], triple['object']['value']] for triple in + response.json()['results']['bindings']) + return pd.DataFrame(data=triples, index=None, columns=["subject", "relation", "object"], dtype=str)
+ + + +
+[docs] +def get_er_vocab(data, file_path: str = None): + # head entity and relation + er_vocab = defaultdict(list) + for triple in data: + er_vocab[(triple[0], triple[1])].append(triple[2]) + if file_path: + save_pickle(data=er_vocab, file_path=file_path) + return er_vocab
+ + + +
+[docs] +def get_re_vocab(data, file_path: str = None): + # head entity and relation + re_vocab = defaultdict(list) + for triple in data: + re_vocab[(triple[1], triple[2])].append(triple[0]) + if file_path: + save_pickle(data=re_vocab, file_path=file_path) + return re_vocab
+ + + +
+[docs] +def get_ee_vocab(data, file_path: str = None): + # head entity and relation + ee_vocab = defaultdict(list) + for triple in data: + ee_vocab[(triple[0], triple[2])].append(triple[1]) + if file_path: + save_pickle(data=ee_vocab, file_path=file_path) + return ee_vocab
+ + + +
+[docs] +def create_constraints(triples, file_path: str = None): + """ + (1) Extract domains and ranges of relations + (2) Store a mapping from relations to entities that are outside of the domain and range. + Crete constrainted entities based on the range of relations + :param triples: + :return: + Tuple[dict, dict] + """ + assert isinstance(triples, np.ndarray) + assert triples.shape[1] == 3 + + # (1) Compute the range and domain of each relation + range_constraints_per_rel = dict() + domain_constraints_per_rel = dict() + set_of_entities = set() + set_of_relations = set() + for (e1, p, e2) in triples: + range_constraints_per_rel.setdefault(p, set()).add(e2) + domain_constraints_per_rel.setdefault(p, set()).add(e1) + set_of_entities.add(e1) + set_of_relations.add(p) + set_of_entities.add(e2) + + for rel in set_of_relations: + range_constraints_per_rel[rel] = list(set_of_entities - range_constraints_per_rel[rel]) + domain_constraints_per_rel[rel] = list(set_of_entities - domain_constraints_per_rel[rel]) + + if file_path: + save_pickle(data=(domain_constraints_per_rel, range_constraints_per_rel), file_path=file_path) + return domain_constraints_per_rel, range_constraints_per_rel
+ + + +
+[docs] +@timeit +def load_with_pandas(self) -> None: + """ Deserialize data """ + print(f'Deserialization Path: {self.kg.deserialize_flag}\n') + start_time = time.time() + print('[1 / 4] Deserializing compressed entity integer mapping...') + self.kg.entity_to_idx = pd.read_parquet(self.kg.deserialize_flag + '/entity_to_idx.gzip') + print(f'Done !\t{time.time() - start_time:.3f} seconds\n') + self.kg.num_entities = len(self.kg.entity_to_idx) + + print('[2 / ] Deserializing compressed relation integer mapping...') + start_time = time.time() + self.kg.relation_to_idx = pd.read_parquet(self.kg.deserialize_flag + '/relation_to_idx.gzip') + print(f'Done !\t{time.time() - start_time:.3f} seconds\n') + + self.kg.num_relations = len(self.kg.relation_to_idx) + print( + '[3 / 4] Converting integer and relation mappings ' + 'from from pandas dataframe to dictionaries for an easy access...', + ) + start_time = time.time() + self.kg.entity_to_idx = self.kg.entity_to_idx.to_dict()['entity'] + self.kg.relation_to_idx = self.kg.relation_to_idx.to_dict()['relation'] + print(f'Done !\t{time.time() - start_time:.3f} seconds\n') + # 10. Serialize (9). + print('[4 / 4] Deserializing integer mapped data and mapping it to numpy ndarray...') + start_time = time.time() + self.kg.train_set = pd.read_parquet(self.kg.deserialize_flag + '/idx_train_df.gzip').values + print(f'Done !\t{time.time() - start_time:.3f} seconds\n') + try: + print('[5 / 4] Deserializing integer mapped data and mapping it to numpy ndarray...') + self.kg.valid_set = pd.read_parquet(self.kg.deserialize_flag + '/idx_valid_df.gzip').values + print('Done!\n') + except FileNotFoundError: + print('No valid data found!\n') + self.kg.valid_set = None # pd.DataFrame() + + try: + print('[6 / 4] Deserializing integer mapped data and mapping it to numpy ndarray...') + self.kg.test_set = pd.read_parquet(self.kg.deserialize_flag + '/idx_test_df.gzip').values + print('Done!\n') + except FileNotFoundError: + print('No test data found\n') + self.kg.test_set = None + + if self.kg.eval_model: + if self.kg.valid_set is not None and self.kg.test_set is not None: + # 16. Create a bijection mapping from subject-relation pairs to tail entities. + data = np.concatenate([self.kg.train_set, self.kg.valid_set, self.kg.test_set]) + else: + data = self.kg.train_set + print('[7 / 4] Creating er,re, and ee type vocabulary for evaluation...') + start_time = time.time() + self.kg.er_vocab = get_er_vocab(data) + self.kg.re_vocab = get_re_vocab(data) + # 17. Create a bijection mapping from subject-object pairs to relations. + self.kg.ee_vocab = get_ee_vocab(data) + self.kg.domain_constraints_per_rel, self.kg.range_constraints_per_rel = create_constraints( + self.kg.train_set) + print(f'Done !\t{time.time() - start_time:.3f} seconds\n')
+ + + +
+[docs] +def save_numpy_ndarray(*, data: np.ndarray, file_path: str): + n, d = data.shape + assert n > 0 + assert d == 3 + with open(file_path, 'wb') as f: + np.save(f, data)
+ + + +
+[docs] +def load_numpy_ndarray(*, file_path: str): + with open(file_path, 'rb') as f: + return np.load(f)
+ + + +
+[docs] +def save_pickle(*, data: object, file_path=str): + pickle.dump(data, open(file_path, "wb"))
+ + + +
+[docs] +def load_pickle(*, file_path=str): + with open(file_path, 'rb') as f: + return pickle.load(f)
+ + + +
+[docs] +def create_recipriocal_triples(x): + """ + Add inverse triples into dask dataframe + :param x: + :return: + """ + return pd.concat([x, x['object'].to_frame(name='subject').join( + x['relation'].map(lambda x: x + '_inverse').to_frame(name='relation')).join( + x['subject'].to_frame(name='object'))], ignore_index=True)
+ + + +
+[docs] +def index_triples_with_pandas(train_set, entity_to_idx: dict, relation_to_idx: dict) -> pd.core.frame.DataFrame: + """ + :param train_set: pandas dataframe + :param entity_to_idx: a mapping from str to integer index + :param relation_to_idx: a mapping from str to integer index + :param num_core: number of cores to be used + :return: indexed triples, i.e., pandas dataframe + """ + n, d = train_set.shape + train_set['subject'] = train_set['subject'].apply(lambda x: entity_to_idx.get(x)) + train_set['relation'] = train_set['relation'].apply(lambda x: relation_to_idx.get(x)) + train_set['object'] = train_set['object'].apply(lambda x: entity_to_idx.get(x)) + # train_set = train_set.dropna(inplace=True) + if isinstance(train_set, pd.core.frame.DataFrame): + assert (n, d) == train_set.shape + else: + raise KeyError('Wrong type training data') + return train_set
+ + + +
+[docs] +def dataset_sanity_checking(train_set: np.ndarray, num_entities: int, num_relations: int) -> None: + """ + + :param train_set: + :param num_entities: + :param num_relations: + :return: + """ + assert isinstance(train_set, np.ndarray) + n, d = train_set.shape + assert d == 3 + try: + assert n > 0 + except AssertionError: + raise AssertionError('Size of the training dataset must be greater than 0.') + + try: + assert num_entities >= max(train_set[:, 0]) and num_entities >= max(train_set[:, 2]) + except AssertionError: + raise AssertionError( + f'Entity Indexing Error:\n' + f'Max ID of a subject or object entity in train set:' + f'{max(train_set[:, 0])} or {max(train_set[:, 2])} is greater than num_entities:{num_entities}') + try: + assert num_relations >= max(train_set[:, 1]) + except AssertionError: + print( + f'Relation Indexing Error:\n' + f'Max ID of a relation in train set:{max(train_set[:, 1])} is greater than num_entities:{num_relations}') + # 13. Sanity checking: data types + assert isinstance(train_set[0], np.ndarray)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/run.html b/_modules/dicee/run.html new file mode 100644 index 00000000..0cc428f6 --- /dev/null +++ b/_modules/dicee/run.html @@ -0,0 +1,336 @@ + + + + + + + + dicee.run - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.run

+import json
+from .executer import Execute
+import pytorch_lightning as pl
+import argparse
+
+
+
+[docs] +def get_default_arguments(description=None): + """ Extends pytorch_lightning Trainer's arguments with ours """ + parser = pl.Trainer.add_argparse_args(argparse.ArgumentParser(add_help=False)) + # Default Trainer param https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#methods + # Data related arguments + parser.add_argument("--path_dataset_folder", type=str, default=None, + help="The path of a folder containing train.txt, and/or valid.txt and/or test.txt" + ",e.g., KGs/UMLS") + parser.add_argument("--sparql_endpoint", type=str, default=None, + help="An endpoint of a triple store, e.g. 'http://localhost:3030/mutagenesis/'. ") + parser.add_argument("--path_single_kg", type=str, default=None, + help="Path of a file corresponding to the input knowledge graph") + parser.add_argument("--path_to_store_single_run", type=str, default=None, + help="A single directory created that contains related data about embeddings.") + parser.add_argument("--storage_path", type=str, default='Experiments', + help="A directory named with time of execution under --storage_path " + "that contains related data about embeddings.") + parser.add_argument("--save_embeddings_as_csv", action="store_true", + help="A flag for saving embeddings in csv file.") + # Model related arguments + parser.add_argument("--model", type=str, + default="Keci", + choices=["ConEx", "AConEx", "ConvQ", "AConvQ", "ConvO", "AConvO", "QMult", + "OMult", "Shallom", "DistMult", "TransE", "ComplEx", "Keci", + "Pykeen_MuRE", "Pykeen_QuatE", "Pykeen_DistMult", "Pykeen_BoxE", "Pykeen_CP", + "Pykeen_HolE", "Pykeen_ProjE", "Pykeen_RotatE", + "Pykeen_TransE", "Pykeen_TransF", "Pykeen_TransH", + "Pykeen_TransR", "Pykeen_TuckER", "Pykeen_ComplEx"], + help="Available knowledge graph embedding models. " + "To use other knowledge graph embedding models available in python, e.g.," + "**Pykeen_BoxE** and add this into choices") + parser.add_argument('--optim', type=str, default='Adam', + help='An optimizer', + choices=['Adam', 'SGD']) + parser.add_argument('--embedding_dim', type=int, default=32, + help='Number of dimensions for an embedding vector. ') + parser.add_argument("--num_epochs", type=int, default=50, help='Number of epochs for training. ') + parser.add_argument('--batch_size', type=int, default=1024, + help='Mini batch size. If None, automatic batch finder is applied') + parser.add_argument("--lr", type=float, default=0.1) + parser.add_argument('--callbacks', type=json.loads, + default={}, + help='{"PPE":{ "last_percent_to_consider": 10}}' + '"Perturb": {"level": "out", "ratio": 0.2, "method": "RN", "scaler": 0.3}') + parser.add_argument("--backend", type=str, default='pandas', + choices=["pandas", "polars", "rdflib"], + help='Backend for loading, preprocessing, indexing input knowledge graph.') + parser.add_argument("--trainer", type=str, default='PL', + choices=['torchCPUTrainer', 'PL', 'torchDDP'], + help='PL (pytorch lightning trainer), torchDDP (custom ddp), torchCPUTrainer (custom cpu only)') + parser.add_argument('--scoring_technique', default="AllvsAll", + help="Training technique for knowledge graph embedding model", + choices=["AllvsAll", "KvsAll", "1vsAll", "NegSample", "KvsSample"]) + parser.add_argument('--neg_ratio', type=int, default=0, + help='The number of negative triples generated per positive triple.') + parser.add_argument('--weight_decay', type=float, default=0.0, help='L2 penalty e.g.(0.00001)') + parser.add_argument('--input_dropout_rate', type=float, default=0.0) + parser.add_argument('--hidden_dropout_rate', type=float, default=0.0) + parser.add_argument("--feature_map_dropout_rate", type=float, default=0.0) + parser.add_argument("--normalization", type=str, default="None", + choices=["LayerNorm", "BatchNorm1d", None], + help="Normalization technique") + parser.add_argument("--init_param", type=str, default=None, choices=["xavier_normal", None], + help="Initialization technique") + parser.add_argument("--gradient_accumulation_steps", type=int, default=0, + help="e.g. gradient_accumulation_steps=2 " + "implies that gradients are accumulated at every second mini-batch") + parser.add_argument('--num_folds_for_cv', type=int, default=0, + help='Number of folds in k-fold cross validation.' + 'If >2 ,no evaluation scenario is applied implies no evaluation.') + parser.add_argument("--eval_model", type=str, default="train_val_test", + choices=["None", "train", "train_val", "train_val_test", "test"], + help='Evaluating link prediction performance on data splits. ') + parser.add_argument("--save_model_at_every_epoch", type=int, default=None, + help='At every X number of epochs model will be saved. If None, we save 4 times.') + parser.add_argument("--label_smoothing_rate", type=float, default=0.0, help='None for not using it.') + parser.add_argument("--kernel_size", type=int, default=3, + help="Square kernel size for convolution based models.") + parser.add_argument("--num_of_output_channels", type=int, default=2, + help="# of output channels in convolution") + parser.add_argument("--num_core", type=int, default=1, + help='Number of cores to be used. 0 implies using single CPU') + parser.add_argument("--random_seed", type=int, default=0, + help='Seed for all, see pl seed_everything().') + parser.add_argument("--sample_triples_ratio", type=float, default=None, help='Sample input data.') + parser.add_argument("--read_only_few", type=int, default=None, + help='READ only first N triples. If 0, read all.') + parser.add_argument("--add_noise_rate", type=float, default=0.0, + help='Add x % of noisy triples into training dataset.') + parser.add_argument('--p', type=int, default=0, + help='P for Clifford Algebra') + parser.add_argument('--q', type=int, default=0, + help='Q for Clifford Algebra') + parser.add_argument('--pykeen_model_kwargs', type=json.loads, default={}) + if description is None: + return parser.parse_args() + return parser.parse_args(description)
+ + + +if __name__ == '__main__': + Execute(get_default_arguments()).start() +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/sanity_checkers.html b/_modules/dicee/sanity_checkers.html new file mode 100644 index 00000000..2aca39f6 --- /dev/null +++ b/_modules/dicee/sanity_checkers.html @@ -0,0 +1,352 @@ + + + + + + + + dicee.sanity_checkers - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.sanity_checkers

+import os
+import glob
+import requests
+
+
+
+[docs] +def is_sparql_endpoint_alive(sparql_endpoint: str = None): + if sparql_endpoint: + query = """SELECT (COUNT(*) as ?num_triples) WHERE { ?s ?p ?o .} """ + response = requests.post(sparql_endpoint, data={'query': query}) + assert response.ok + print('SPARQL connection is successful') + return response.ok + else: + return False
+ + + +
+[docs] +def validate_knowledge_graph(args): + """ Validating the source of knowledge graph """ + # (1) Validate SPARQL endpoint + if is_sparql_endpoint_alive(args.sparql_endpoint): + try: + assert args.dataset_dir is None and args.path_single_kg is None + except AssertionError: + raise RuntimeWarning(f'The path_dataset_folder and path_single_kg arguments ' + f'must be None if sparql_endpoint is given.' + f'***{args.dataset_dir}***\n' + f'***{args.path_single_kg}***\n' + f'These two parameters are set to None.') + # Set None. + args.dataset_dir = None + args.path_single_kg = None + + elif args.path_single_kg is not None: + if args.sparql_endpoint is not None or args.path_single_kg is not None: + print(f'The dataset_dir and sparql_endpoint arguments ' + f'must be None if path_single_kg is given.' + f'***{args.dataset_dir}***\n' + f'***{args.sparql_endpoint}***\n' + f'These two parameters are set to None.') + args.dataset_dir = None + args.sparql_endpoint = None + + elif args.dataset_dir: + try: + assert isinstance(args.dataset_dir, str) + except AssertionError: + raise AssertionError(f'The path_dataset_folder must be string sparql_endpoint is not given.' + f'***{args.dataset_dir}***') + try: + assert os.path.isdir(args.dataset_dir) or os.path.isfile(args.dataset_dir) + except AssertionError: + raise AssertionError(f'The path_dataset_folder does not lead to a directory ' + f'***{args.dataset_dir}***') + # Check whether the input parameter leads a standard data format (e.g. FOLDER/train.txt) + if glob.glob(args.dataset_dir + '/train*'): + """ all is good we have xxx/train.txt""" + else: + raise ValueError( + f"---path_dataset_folder **{args.dataset_dir}** must lead to " + f"**folder** containing at least train.txt**. " + f"Use --path_single_kg **folder/dataset.format**, if you have a single file.") + + if args.sparql_endpoint is not None or args.path_single_kg is not None: + print(f'The sparql_endpoint and path_single_kg arguments ' + f'must be None if dataset_dir is given.' + f'***{args.sparql_endpoint}***\n' + f'***{args.path_single_kg}***\n' + f'These two parameters are set to None.') + args.sparql_endpoint = None + args.path_single_kg = None + + + elif args.dataset_dir is None and args.path_single_kg is None and args.sparql_endpoint is None: + raise RuntimeError(f"One of the following arguments must be given:" + f"--path_dataset_folder:{args.dataset_dir},\t" + f"--path_single_kg:{args.path_single_kg},\t" + f"--sparql_endpoint:{args.sparql_endpoint}.") + else: + raise RuntimeError('Invalid computation flow!')
+ + + +
+[docs] +def sanity_checking_with_arguments(args): + try: + assert args.embedding_dim > 0 + except AssertionError: + raise AssertionError(f'embedding_dim must be strictly positive. Currently:{args.embedding_dim}') + + if args.scoring_technique not in ["AllvsAll", "KvsSample", "KvsAll", "NegSample", "1vsAll", "Pyke"]: + raise KeyError(f'Invalid training strategy => {args.scoring_technique}.') + + assert args.learning_rate > 0 + if args.num_folds_for_cv is None: + args.num_folds_for_cv = 0 + try: + assert args.num_folds_for_cv >= 0 + except AssertionError: + raise AssertionError(f'num_folds_for_cv can not be negative. Currently:{args.num_folds_for_cv}') + validate_knowledge_graph(args)
+ + + +
+[docs] +def config_kge_sanity_checking(args, dataset): + """ + Sanity checking for input hyperparams. + :return: + """ + assert isinstance(args.batch_size, int) or args.batch_size is None + if args.model == 'Shallom' and args.scoring_technique == 'NegSample': + print( + 'Shallom can not be trained with Negative Sampling. Scoring technique is changed to KvsALL') + args.scoring_technique = 'KvsAll' + + if args.scoring_technique == 'KvsAll': + args.neg_ratio = None + return args, dataset
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/static_funcs.html b/_modules/dicee/static_funcs.html new file mode 100644 index 00000000..ea0fc762 --- /dev/null +++ b/_modules/dicee/static_funcs.html @@ -0,0 +1,840 @@ + + + + + + + + dicee.static_funcs - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.static_funcs

+import numpy as np
+import torch
+import datetime
+from typing import Tuple, List
+from .models import CMult, Pyke, DistMult, KeciBase, Keci, TransE, \
+    ComplEx, AConEx, AConvO, AConvQ, ConvQ, ConvO, ConEx, QMult, OMult, Shallom
+from .models.pykeen_models import PykeenKGE
+import time
+import pandas as pd
+import json
+import glob
+import functools
+import os
+import psutil
+from .models.base_model import BaseKGE
+import pickle
+
+
+
+[docs] +def timeit(func): + @functools.wraps(func) + def timeit_wrapper(*args, **kwargs): + start_time = time.perf_counter() + result = func(*args, **kwargs) + end_time = time.perf_counter() + total_time = end_time - start_time + print( + f'Took {total_time:.4f} secs ' + f'| Current Memory Usage {psutil.Process(os.getpid()).memory_info().rss / 1000000: .5} in MB') + return result + + return timeit_wrapper
+ + + +
+[docs] +def save_pickle(*, data: object, file_path=str): + pickle.dump(data, open(file_path, "wb"))
+ + + +
+[docs] +def load_pickle(file_path=str): + with open(file_path, 'rb') as f: + return pickle.load(f)
+ + + +# @TODO: Could these funcs can be merged? +
+[docs] +def select_model(args: dict, is_continual_training: bool = None, storage_path: str = None): + isinstance(args, dict) + assert len(args) > 0 + assert isinstance(is_continual_training, bool) + assert isinstance(storage_path, str) + if is_continual_training: + print('Loading pre-trained model...') + model, _ = intialize_model(args) + try: + weights = torch.load(storage_path + '/model.pt', torch.device('cpu')) + model.load_state_dict(weights) + for parameter in model.parameters(): + parameter.requires_grad = True + model.train() + except FileNotFoundError: + print(f"{storage_path}/model.pt is not found. The model will be trained with random weights") + return model, _ + else: + return intialize_model(args)
+ + + +
+[docs] +def load_model(path_of_experiment_folder: str, model_name='model.pt') -> Tuple[object, dict, dict]: + """ Load weights and initialize pytorch module from namespace arguments""" + print(f'Loading model {model_name}...', end=' ') + start_time = time.time() + # (1) Load weights.. + weights = torch.load(path_of_experiment_folder + f'/{model_name}', torch.device('cpu')) + num_ent, ent_dim = weights['entity_embeddings.weight'].shape + num_rel, rel_dim = weights['relation_embeddings.weight'].shape + assert ent_dim==rel_dim + # (2) Loading input configuration. + configs = load_json(path_of_experiment_folder + '/configuration.json') + configs["num_entities"] = num_ent + configs["num_relations"] = num_rel + #configs["embedding_dim"] = ent_dim + + print(f'Done! It took {time.time() - start_time:.3f}') + # (4) Select the model + model, _ = intialize_model(configs) + # (5) Put (1) into (4) + model.load_state_dict(weights) + # (6) Set it into eval model. + for parameter in model.parameters(): + parameter.requires_grad = False + model.eval() + start_time = time.time() + print('Loading entity and relation indexes...', end=' ') + try: + # Maybe ? https://docs.python.org/3/library/mmap.html + with open(path_of_experiment_folder + '/entity_to_idx.p', 'rb') as f: + entity_to_idx = pickle.load(f) + except FileNotFoundError: + print("entity_to_idx.p not found") + entity_to_idx=dict() + try: + with open(path_of_experiment_folder + '/relation_to_idx.p', 'rb') as f: + relation_to_idx = pickle.load(f) + except FileNotFoundError: + print("relation_to_idx.p not found") + relation_to_idx=dict() + print(f'Done! It took {time.time() - start_time:.4f}') + return model, entity_to_idx, relation_to_idx
+ + + +
+[docs] +def load_model_ensemble(path_of_experiment_folder: str) -> Tuple[BaseKGE, pd.DataFrame, pd.DataFrame]: + """ Construct Ensemble Of weights and initialize pytorch module from namespace arguments + + (1) Detect models under given path + (2) Accumulate parameters of detected models + (3) Normalize parameters + (4) Insert (3) into model. + """ + print('Constructing Ensemble of ', end=' ') + start_time = time.time() + # (1) Detect models under given path. + paths_for_loading = glob.glob(path_of_experiment_folder + '/model*') + print(f'{len(paths_for_loading)} models...') + assert len(paths_for_loading) > 0 + num_of_models = len(paths_for_loading) + weights = None + # (2) Accumulate parameters of detected models. + while len(paths_for_loading): + p = paths_for_loading.pop() + print(f'Model: {p}...') + if weights is None: + weights = torch.load(p, torch.device('cpu')) + else: + five_weights = torch.load(p, torch.device('cpu')) + # (2.1) Accumulate model parameters + for k, _ in weights.items(): + if 'weight' in k: + weights[k] = (weights[k] + five_weights[k]) + # (3) Normalize parameters. + for k, _ in weights.items(): + if 'weight' in k: + weights[k] /= num_of_models + # (4) Insert (3) into model + # (4.1) Load report and configuration to initialize model. + configs = load_json(path_of_experiment_folder + '/configuration.json') + report = load_json(path_of_experiment_folder + '/report.json') + configs["num_entities"] = report["num_entities"] + configs["num_relations"] = report["num_relations"] + print(f'Done! It took {time.time() - start_time:.2f} seconds.') + # (4.2) Select the model + model, _ = intialize_model(configs) + # (4.3) Put (3) into their places + model.load_state_dict(weights, strict=True) + # (6) Set it into eval model. + print('Setting Eval mode & requires_grad params to False') + for parameter in model.parameters(): + parameter.requires_grad = False + model.eval() + start_time = time.time() + print('Loading entity and relation indexes...', end=' ') + with open(path_of_experiment_folder + '/entity_to_idx.p', 'rb') as f: + entity_to_idx = pickle.load(f) + with open(path_of_experiment_folder + '/relation_to_idx.p', 'rb') as f: + relation_to_idx = pickle.load(f) + assert isinstance(entity_to_idx, dict) + assert isinstance(relation_to_idx, dict) + print(f'Done! It took {time.time() - start_time:.4f}') + return model, entity_to_idx, relation_to_idx
+ + + +
+[docs] +def save_numpy_ndarray(*, data: np.ndarray, file_path: str): + n, d = data.shape + assert n > 0 + assert d == 3 + with open(file_path, 'wb') as f: + np.save(f, data)
+ + + +
+[docs] +def numpy_data_type_changer(train_set: np.ndarray, num: int) -> np.ndarray: + """ + Detect most efficient data type for a given triples + :param train_set: + :param num: + :return: + """ + assert isinstance(num, int) + if np.iinfo(np.int8).max > num: + # print(f'Setting int8,\t {np.iinfo(np.int8).max}') + train_set = train_set.astype(np.int8) + elif np.iinfo(np.int16).max > num: + # print(f'Setting int16,\t {np.iinfo(np.int16).max}') + train_set = train_set.astype(np.int16) + elif np.iinfo(np.int32).max > num: + # print(f'Setting int32,\t {np.iinfo(np.int32).max}') + train_set = train_set.astype(np.int32) + else: + raise TypeError('Int64?') + return train_set
+ + + +
+[docs] +def save_checkpoint_model(model, path: str) -> None: + """ Store Pytorch model into disk""" + if isinstance(model, BaseKGE): + try: + torch.save(model.state_dict(), path) + except ReferenceError as e: + print(e) + print(model.name) + print('Could not save the model correctly') + else: + torch.save(model.model.state_dict(), path)
+ + + +
+[docs] +def store(trainer, + trained_model, model_name: str = 'model', full_storage_path: str = None, + save_embeddings_as_csv=False) -> None: + """ + Store trained_model model and save embeddings into csv file. + :param trainer: an instance of trainer class + :param full_storage_path: path to save parameters. + :param model_name: string representation of the name of the model. + :param trained_model: an instance of BaseKGE see core.models.base_model . + :param save_embeddings_as_csv: for easy access of embeddings. + :return: + """ + assert full_storage_path is not None + assert isinstance(model_name, str) + assert len(model_name) > 1 + + # (1) Save pytorch model in trained_model . + save_checkpoint_model(model=trained_model, path=full_storage_path + f'/{model_name}.pt') + if save_embeddings_as_csv: + entity_emb, relation_ebm = trained_model.get_embeddings() + entity_to_idx = pickle.load(open(full_storage_path + '/entity_to_idx.p', 'rb')) + entity_str = entity_to_idx.keys() + # Ensure that the ordering is correct. + assert list(range(0, len(entity_str))) == list(entity_to_idx.values()) + save_embeddings(entity_emb.numpy(), indexes=entity_str, + path=full_storage_path + '/' + trained_model.name + '_entity_embeddings.csv') + del entity_to_idx, entity_str, entity_emb + if relation_ebm is not None: + relation_to_idx = pickle.load(open(full_storage_path + '/relation_to_idx.p', 'rb')) + relations_str = relation_to_idx.keys() + + save_embeddings(relation_ebm.numpy(), indexes=relations_str, + path=full_storage_path + '/' + trained_model.name + '_relation_embeddings.csv') + del relation_ebm, relations_str, relation_to_idx + else: + pass
+ + + +
+[docs] +def add_noisy_triples(train_set: pd.DataFrame, add_noise_rate: float) -> pd.DataFrame: + """ + Add randomly constructed triples + :param train_set: + :param add_noise_rate: + :return: + """ + num_triples = len(train_set) + num_noisy_triples = int(num_triples * add_noise_rate) + print(f'[4 / 14] Generating {num_noisy_triples} noisy triples for training data...') + + list_of_entities = pd.unique(train_set[['subject', 'object']].values.ravel()) + + train_set = pd.concat([train_set, + # Noisy triples + pd.DataFrame( + {'subject': np.random.choice(list_of_entities, num_noisy_triples), + 'relation': np.random.choice( + pd.unique(train_set[['relation']].values.ravel()), + num_noisy_triples), + 'object': np.random.choice(list_of_entities, num_noisy_triples)} + ) + ], ignore_index=True) + + del list_of_entities + + assert num_triples + num_noisy_triples == len(train_set) + return train_set
+ + + +
+[docs] +def read_or_load_kg(args, cls): + print('*** Read or Load Knowledge Graph ***') + start_time = time.time() + kg = cls(dataset_dir=args.dataset_dir, + add_noise_rate=args.add_noise_rate, + sparql_endpoint=args.sparql_endpoint, + path_single_kg=args.path_single_kg, + add_reciprical=args.apply_reciprical_or_noise, + eval_model=args.eval_model, + read_only_few=args.read_only_few, + sample_triples_ratio=args.sample_triples_ratio, + path_for_serialization=args.full_storage_path, + path_for_deserialization=args.path_experiment_folder if hasattr(args, 'path_experiment_folder') else None, + backend=args.backend) + print(f'Preprocessing took: {time.time() - start_time:.3f} seconds') + # (2) Share some info about data for easy access. + print(kg.description_of_input) + return kg
+ + + +
+[docs] +def intialize_model(args: dict) -> Tuple[object, str]: + # @TODO: Apply construct_krone as callback? or use KronE_QMult as a prefix. + # @TODO: Remove form_of_labelling + print(f"Initializing {args['model']}...") + model_name = args['model'] + if "pykeen" in model_name.lower(): + model = PykeenKGE(args=args) + form_of_labelling = "EntityPrediction" + elif model_name == 'Shallom': + model = Shallom(args=args) + form_of_labelling = 'RelationPrediction' + elif model_name == 'ConEx': + model = ConEx(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'AConEx': + model = AConEx(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'QMult': + model = QMult(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'OMult': + model = OMult(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'ConvQ': + model = ConvQ(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'AConvQ': + model = AConvQ(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'ConvO': + model = ConvO(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'AConvO': + model = AConvO(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'ComplEx': + model = ComplEx(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'DistMult': + model = DistMult(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'TransE': + model = TransE(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'Pyke': + model = Pyke(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'Keci': + model = Keci(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'KeciBase': + model = KeciBase(args=args) + form_of_labelling = 'EntityPrediction' + elif model_name == 'CMult': + model = CMult(args=args) + form_of_labelling = 'EntityPrediction' + else: + raise ValueError(f"--model_name: {model_name} is not found.") + return model, form_of_labelling
+ + + +
+[docs] +def load_json(p: str) -> dict: + with open(p, 'r') as r: + args = json.load(r) + return args
+ + + +
+[docs] +def save_embeddings(embeddings: np.ndarray, indexes, path: str) -> None: + """ + Save it as CSV if memory allows. + :param embeddings: + :param indexes: + :param path: + :return: + """ + try: + pd.DataFrame(embeddings, index=indexes).to_csv(path) + except KeyError or AttributeError as e: + print('Exception occurred at saving entity embeddings. Computation will continue') + print(e)
+ + + +
+[docs] +def random_prediction(pre_trained_kge): + head_entity: List[str] + relation: List[str] + tail_entity: List[str] + head_entity = pre_trained_kge.sample_entity(1) + relation = pre_trained_kge.sample_relation(1) + tail_entity = pre_trained_kge.sample_entity(1) + triple_score = pre_trained_kge.triple_score(h=head_entity, + r=relation, + t=tail_entity) + return f'( {head_entity[0]},{relation[0]}, {tail_entity[0]} )', pd.DataFrame({'Score': triple_score})
+ + + +
+[docs] +def deploy_triple_prediction(pre_trained_kge, str_subject, str_predicate, str_object): + triple_score = pre_trained_kge.triple_score(h=[str_subject], + r=[str_predicate], + t=[str_object]) + return f'( {str_subject}, {str_predicate}, {str_object} )', pd.DataFrame({'Score': triple_score})
+ + + +
+[docs] +def deploy_tail_entity_prediction(pre_trained_kge, str_subject, str_predicate, top_k): + if pre_trained_kge.model.name == 'Shallom': + print('Tail entity prediction is not available for Shallom') + raise NotImplementedError + scores, entity = pre_trained_kge.predict_topk(h=[str_subject], r=[str_predicate], topk=top_k) + return f'( {str_subject}, {str_predicate}, ? )', pd.DataFrame({'Entity': entity, 'Score': scores})
+ + + +
+[docs] +def deploy_head_entity_prediction(pre_trained_kge, str_object, str_predicate, top_k): + if pre_trained_kge.model.name == 'Shallom': + print('Head entity prediction is not available for Shallom') + raise NotImplementedError + + scores, entity = pre_trained_kge.predict_topk(t=[str_object], r=[str_predicate], topk=top_k) + return f'( ?, {str_predicate}, {str_object} )', pd.DataFrame({'Entity': entity, 'Score': scores})
+ + + +
+[docs] +def deploy_relation_prediction(pre_trained_kge, str_subject, str_object, top_k): + scores, relations = pre_trained_kge.predict_topk(h=[str_subject], t=[str_object], topk=top_k) + return f'( {str_subject}, ?, {str_object} )', pd.DataFrame({'Relations': relations, 'Score': scores})
+ + + +
+[docs] +@timeit +def vocab_to_parquet(vocab_to_idx, name, path_for_serialization, print_into): + # @TODO: This function should take any DASK/Pandas DataFrame or Series. + print(print_into) + vocab_to_idx.to_parquet(path_for_serialization + f'/{name}', compression='gzip', engine='pyarrow') + print('Done !\n')
+ + + +
+[docs] +def create_experiment_folder(folder_name='Experiments'): + directory = os.getcwd() + "/" + folder_name + "/" + # folder_name = str(datetime.datetime.now()) + folder_name = str(datetime.datetime.now()).replace(":", "-") + # path_of_folder = directory + folder_name + path_of_folder = os.path.join(directory, folder_name) + os.makedirs(path_of_folder) + return path_of_folder
+ + + +
+[docs] +def continual_training_setup_executor(executor) -> None: + """ + storage_path:str A path leading to a parent directory, where a subdirectory containing KGE related data + + full_storage_path:str A path leading to a subdirectory containing KGE related data + + """ + if executor.is_continual_training: + # (4.1) If it is continual, then store new models on previous path. + executor.storage_path = executor.args.full_storage_path + else: + # Create a single directory containing KGE and all related data + if executor.args.path_to_store_single_run: + os.makedirs(executor.args.path_to_store_single_run, exist_ok=False) + executor.args.full_storage_path = executor.args.path_to_store_single_run + else: + # Create a parent and subdirectory. + executor.args.full_storage_path = create_experiment_folder(folder_name=executor.args.storage_path) + executor.storage_path = executor.args.full_storage_path + with open(executor.args.full_storage_path + '/configuration.json', 'w') as file_descriptor: + temp = vars(executor.args) + json.dump(temp, file_descriptor, indent=3)
+ + + +
+[docs] +def exponential_function(x: np.ndarray, lam: float, ascending_order=True) -> torch.FloatTensor: + # A sequence in exponentially decreasing order + result = np.exp(-lam * x) / np.sum(np.exp(-lam * x)) + assert 0.999 < sum(result) < 1.0001 + result = np.flip(result) if ascending_order else result + return torch.tensor(result.tolist())
+ + + +
+[docs] +@timeit +def load_numpy(path) -> np.ndarray: + print('Loading indexed training data...', end='') + with open(path, 'rb') as f: + data = np.load(f) + return data
+ + +
+[docs] +def evaluate(entity_to_idx, scores, easy_answers, hard_answers): + """ + Evaluate multi hop query answering on different query types + """ + # Calculate MRR considering the hard and easy answers + total_mrr = 0 + total_h1 = 0 + total_h3 = 0 + total_h10 = 0 + num_queries = len(scores) + # @TODO: Dictionary keys do not need to be in order, zip(entity_to_idx.keys(), entity_score) is not a viable solution + # @TODO: Although it is working + # @TODO: Use pytorch to obtain the entities sorted in the descending order of scores + for query, entity_score in scores.items(): + entity_scores = [(ei, s) for ei, s in zip(entity_to_idx.keys(), entity_score)] + entity_scores = sorted(entity_scores, key=lambda x: x[1], reverse=True) + + # Extract corresponding easy and hard answers + easy_ans = easy_answers[query] + hard_ans = hard_answers[query] + easy_answer_indices = [idx for idx, (entity, _) in enumerate(entity_scores) if entity in easy_ans] + hard_answer_indices = [idx for idx, (entity, _) in enumerate(entity_scores) if entity in hard_ans] + + answer_indices = easy_answer_indices + hard_answer_indices + + cur_ranking = np.array(answer_indices) + + # Sort by position in the ranking; indices for (easy + hard) answers + cur_ranking, indices = np.sort(cur_ranking), np.argsort(cur_ranking) + num_easy = len(easy_ans) + num_hard = len(hard_ans) + + # Indices with hard answers only + masks = indices >= num_easy + + # Reduce ranking for each answer entity by the amount of (easy+hard) answers appearing before it + answer_list = np.arange(num_hard + num_easy, dtype=float) + cur_ranking = cur_ranking - answer_list + 1 + + # Only take indices that belong to the hard answers + cur_ranking = cur_ranking[masks] + # print(cur_ranking) + mrr = np.mean(1.0 / cur_ranking) + h1 = np.mean((cur_ranking <= 1).astype(float)) + h3 = np.mean((cur_ranking <= 3).astype(float)) + h10 = np.mean((cur_ranking <= 10).astype(float)) + total_mrr += mrr + total_h1 += h1 + total_h3 += h3 + total_h10 += h10 + # average for all queries of a type + avg_mrr = total_mrr / num_queries + avg_h1 = total_h1 / num_queries + avg_h3 = total_h3 / num_queries + avg_h10 = total_h10 / num_queries + + return avg_mrr, avg_h1, avg_h3, avg_h10
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/static_funcs_training.html b/_modules/dicee/static_funcs_training.html new file mode 100644 index 00000000..921687c1 --- /dev/null +++ b/_modules/dicee/static_funcs_training.html @@ -0,0 +1,351 @@ + + + + + + + + dicee.static_funcs_training - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.static_funcs_training

+import torch
+from typing import Dict,Tuple,List
+import numpy as np
+
+
+
+[docs] +def evaluate_lp(model, triple_idx, num_entities, er_vocab:Dict[Tuple,List], re_vocab:Dict[Tuple,List], info='Eval Starts'): + """ + Evaluate model in a standard link prediction task + + for each triple + the rank is computed by taking the mean of the filtered missing head entity rank and + the filtered missing tail entity rank + :param model: + :param triple_idx: + :param info: + :return: + """ + model.eval() + print(info) + print(f'Num of triples {len(triple_idx)}') + print('** Evaluation without batching') + hits = dict() + reciprocal_ranks = [] + # Iterate over test triples + all_entities = torch.arange(0, num_entities).long() + all_entities = all_entities.reshape(len(all_entities), ) + # Iterating one by one is not good when you are using batch norm + for i in range(0, len(triple_idx)): + # (1) Get a triple (head entity, relation, tail entity + data_point = triple_idx[i] + h, r, t = data_point[0], data_point[1], data_point[2] + + # (2) Predict missing heads and tails + x = torch.stack((torch.tensor(h).repeat(num_entities, ), + torch.tensor(r).repeat(num_entities, ), + all_entities), dim=1) + + predictions_tails = model.forward_triples(x) + x = torch.stack((all_entities, + torch.tensor(r).repeat(num_entities, ), + torch.tensor(t).repeat(num_entities) + ), dim=1) + + predictions_heads = model.forward_triples(x) + del x + + # 3. Computed filtered ranks for missing tail entities. + # 3.1. Compute filtered tail entity rankings + filt_tails = er_vocab[(h, r)] + # 3.2 Get the predicted target's score + target_value = predictions_tails[t].item() + # 3.3 Filter scores of all triples containing filtered tail entities + predictions_tails[filt_tails] = -np.Inf + # 3.4 Reset the target's score + predictions_tails[t] = target_value + # 3.5. Sort the score + _, sort_idxs = torch.sort(predictions_tails, descending=True) + sort_idxs = sort_idxs.detach() + filt_tail_entity_rank = np.where(sort_idxs == t)[0][0] + + # 4. Computed filtered ranks for missing head entities. + # 4.1. Retrieve head entities to be filtered + filt_heads = re_vocab[(r, t)] + # 4.2 Get the predicted target's score + target_value = predictions_heads[h].item() + # 4.3 Filter scores of all triples containing filtered head entities. + predictions_heads[filt_heads] = -np.Inf + predictions_heads[h] = target_value + _, sort_idxs = torch.sort(predictions_heads, descending=True) + sort_idxs = sort_idxs.detach() + filt_head_entity_rank = np.where(sort_idxs == h)[0][0] + + # 4. Add 1 to ranks as numpy array first item has the index of 0. + filt_head_entity_rank += 1 + filt_tail_entity_rank += 1 + + rr = 1.0 / filt_head_entity_rank + (1.0 / filt_tail_entity_rank) + # 5. Store reciprocal ranks. + reciprocal_ranks.append(rr) + # print(f'{i}.th triple: mean reciprical rank:{rr}') + + # 4. Compute Hit@N + for hits_level in range(1, 11): + res = 1 if filt_head_entity_rank <= hits_level else 0 + res += 1 if filt_tail_entity_rank <= hits_level else 0 + if res > 0: + hits.setdefault(hits_level, []).append(res) + + mean_reciprocal_rank = sum(reciprocal_ranks) / (float(len(triple_idx) * 2)) + + if 1 in hits: + hit_1 = sum(hits[1]) / (float(len(triple_idx) * 2)) + else: + hit_1 = 0 + + if 3 in hits: + hit_3 = sum(hits[3]) / (float(len(triple_idx) * 2)) + else: + hit_3 = 0 + + if 10 in hits: + hit_10 = sum(hits[10]) / (float(len(triple_idx) * 2)) + else: + hit_10 = 0 + + results = {'H@1': hit_1, 'H@3': hit_3, 'H@10': hit_10, + 'MRR': mean_reciprocal_rank} + print(results) + return results
+ + + +
+[docs] +def efficient_zero_grad(model): + # Use this instead of + # self.optimizer.zero_grad() + # + # https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-parameter-grad-none-instead-of-model-zero-grad-or-optimizer-zero-grad + for param in model.parameters(): + param.grad = None
+ + +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/static_preprocess_funcs.html b/_modules/dicee/static_preprocess_funcs.html new file mode 100644 index 00000000..42b979cc --- /dev/null +++ b/_modules/dicee/static_preprocess_funcs.html @@ -0,0 +1,390 @@ + + + + + + + + dicee.static_preprocess_funcs - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.static_preprocess_funcs

+import functools
+import numpy as np
+from typing import Tuple
+import time
+from collections import defaultdict
+from .sanity_checkers import sanity_checking_with_arguments
+
+enable_log = False
+
+[docs] +def timeit(func): + @functools.wraps(func) + def timeit_wrapper(*args, **kwargs): + start_time = time.perf_counter() + result = func(*args, **kwargs) + end_time = time.perf_counter() + total_time = end_time - start_time + if enable_log: + if args is not None: + s_args = [type(i) for i in args] + else: + s_args = args + if kwargs is not None: + s_kwargs = {k: type(v) for k, v in kwargs.items()} + else: + s_kwargs = kwargs + print(f'Function {func.__name__} with Args:{s_args} | Kwargs:{s_kwargs} took {total_time:.4f} seconds') + else: + print(f'Took {total_time:.4f} seconds') + + return result + + return timeit_wrapper
+ + + +
+[docs] +def preprocesses_input_args(args): + """ Sanity Checking in input arguments """ + # To update the default value of Trainer in pytorch-lightnings + args.max_epochs = args.num_epochs + args.min_epochs = args.num_epochs + assert args.weight_decay >= 0.0 + args.learning_rate = args.lr + args.deterministic = True + + assert args.init_param in ['xavier_normal', None] + + # No need to eval. Investigate runtime performance + args.check_val_every_n_epoch = 10 ** 6 # ,i.e., no eval + assert args.add_noise_rate is None or isinstance(args.add_noise_rate, float) + args.logger = False + try: + assert args.eval_model in [None, 'None', 'train', 'val', 'test', 'train_val', 'train_test', 'val_test', + 'train_val_test'] + except AssertionError: + raise AssertionError(f'Unexpected input for eval_model ***\t{args.eval_model}\t***') + + if args.eval_model == 'None': + args.eval_model = None + + # reciprocal checking + if args.scoring_technique in ["AllvsAll", "KvsSample", "KvsAll", "1vsAll"]: + args.apply_reciprical_or_noise = True + elif args.scoring_technique == 'NegSample': + args.apply_reciprical_or_noise = False + else: + raise KeyError(f'Unexpected input for scoring_technique.\t{args.scoring_technique}') + + if args.sample_triples_ratio is not None: + assert 1.0 >= args.sample_triples_ratio >= 0.0 + assert args.backend in ["pandas", "polars", "rdflib"] + sanity_checking_with_arguments(args) + if args.model == 'Shallom': + args.scoring_technique = 'KvsAll' + if args.normalization == 'None': + args.normalization = None + assert args.normalization in [None, 'LayerNorm', 'BatchNorm1d'] + return args
+ + + +
+[docs] +@timeit +def create_constraints(triples: np.ndarray) -> Tuple[dict, dict, dict, dict]: + """ + (1) Extract domains and ranges of relations + (2) Store a mapping from relations to entities that are outside of the domain and range. + Create constraints entities based on the range of relations + :param triples: + :return: + """ + assert isinstance(triples, np.ndarray) + assert triples.shape[1] == 3 + + # (1) Compute the range and domain of each relation + domain_per_rel = dict() + range_per_rel = dict() + + range_constraints_per_rel = dict() + domain_constraints_per_rel = dict() + set_of_entities = set() + set_of_relations = set() + print(f'Constructing domain and range information by iterating over {len(triples)} triples...', end='\t') + for (e1, p, e2) in triples: + # e1, p, e2 have numpy.int16 or else types. + domain_per_rel.setdefault(p, set()).add(e1) + range_per_rel.setdefault(p, set()).add(e2) + set_of_entities.add(e1) + set_of_relations.add(p) + set_of_entities.add(e2) + print(f'Creating constraints based on {len(set_of_relations)} relations and {len(set_of_entities)} entities...', + end='\t') + for rel in set_of_relations: + range_constraints_per_rel[rel] = list(set_of_entities - range_per_rel[rel]) + domain_constraints_per_rel[rel] = list(set_of_entities - domain_per_rel[rel]) + return domain_constraints_per_rel, range_constraints_per_rel, domain_per_rel, range_per_rel
+ + + +
+[docs] +def get_er_vocab(data): + # head entity and relation + er_vocab = defaultdict(list) + for triple in data: + er_vocab[(triple[0], triple[1])].append(triple[2]) + return er_vocab
+ + + +
+[docs] +def get_re_vocab(data): + # head entity and relation + re_vocab = defaultdict(list) + for triple in data: + re_vocab[(triple[1], triple[2])].append(triple[0]) + return re_vocab
+ + + +
+[docs] +def get_ee_vocab(data): + # head entity and relation + ee_vocab = defaultdict(list) + for triple in data: + ee_vocab[(triple[0], triple[2])].append(triple[1]) + return ee_vocab
+ + + +
+[docs] +@timeit +def mapping_from_first_two_cols_to_third(train_set_idx): + store = dict() + for s_idx, p_idx, o_idx in train_set_idx: + store.setdefault((s_idx, p_idx), list()).append(o_idx) + return store
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/trainer/dice_trainer.html b/_modules/dicee/trainer/dice_trainer.html new file mode 100644 index 00000000..3285806b --- /dev/null +++ b/_modules/dicee/trainer/dice_trainer.html @@ -0,0 +1,501 @@ + + + + + + + + dicee.trainer.dice_trainer - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.trainer.dice_trainer

+import pytorch_lightning as pl
+import gc
+from typing import Union
+from dicee.models.base_model import BaseKGE
+from dicee.static_funcs import select_model
+from dicee.callbacks import (PPE, FPPE, Eval, KronE, PrintCallback, KGESaveCallback, AccumulateEpochLossCallback,
+                             Perturb)
+from dicee.dataset_classes import construct_dataset, reload_dataset
+from .torch_trainer import TorchTrainer
+from .torch_trainer_ddp import TorchDDPTrainer
+from ..static_funcs import timeit
+import os
+import torch
+from pytorch_lightning.strategies import DDPStrategy
+import pandas as pd
+from sklearn.model_selection import KFold
+import copy
+from typing import List, Tuple
+from ..knowledge_graph import KG
+
+
+
+[docs] +def initialize_trainer(args, callbacks): + if args.trainer == 'torchCPUTrainer': + print('Initializing TorchTrainer CPU Trainer...', end='\t') + return TorchTrainer(args, callbacks=callbacks) + elif args.trainer == 'torchDDP': + if torch.cuda.is_available(): + print('Initializing TorchDDPTrainer GPU', end='\t') + return TorchDDPTrainer(args, callbacks=callbacks) + else: + print('Initializing TorchTrainer CPU Trainer', end='\t') + return TorchTrainer(args, callbacks=callbacks) + elif args.trainer == 'PL': + print('Initializing Pytorch-lightning Trainer', end='\t') + return pl.Trainer.from_argparse_args(args, + callbacks=callbacks, + strategy=DDPStrategy(find_unused_parameters=False)) + else: + print('Initialize TorchTrainer CPU Trainer', end='\t') + return TorchTrainer(args, callbacks=callbacks)
+ + + +
+[docs] +def get_callbacks(args): + callbacks = [PrintCallback(), + KGESaveCallback(every_x_epoch=args.save_model_at_every_epoch, + max_epochs=args.max_epochs, + path=args.full_storage_path), + AccumulateEpochLossCallback(path=args.full_storage_path) + ] + if isinstance(args.callbacks, list): + return callbacks + for k, v in args.callbacks.items(): + if k=="Perturb": + callbacks.append(Perturb(**v)) + elif k == 'FPP': + callbacks.append( + FPPE(num_epochs=args.num_epochs, path=args.full_storage_path, + last_percent_to_consider=v.get('last_percent_to_consider'))) + elif k == 'PPE': + callbacks.append( + PPE(num_epochs=args.num_epochs, path=args.full_storage_path, + last_percent_to_consider=v.get('last_percent_to_consider'))) + elif k == 'KronE': + callbacks.append(KronE()) + elif k == 'Eval': + callbacks.append(Eval(path=args.full_storage_path, epoch_ratio=v.get('epoch_ratio'))) + else: + raise RuntimeError(f'Incorrect callback:{k}') + return callbacks
+ + +
+[docs] +class DICE_Trainer: + """ + DICE_Trainer implement + 1- Pytorch Lightning trainer (https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html) + 2- Multi-GPU Trainer(https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) + 3- CPU Trainer + + Parameter + --------- + args + + is_continual_training:bool + + storage_path:str + + evaluator: + + Returns + ------- + report:dict + """ + + def __init__(self, args, is_continual_training, storage_path, evaluator=None): + self.report = dict() + self.args = args + self.trainer = None + self.is_continual_training = is_continual_training + self.storage_path = storage_path + # Required for CV. + self.evaluator = evaluator + self.form_of_labelling = None + print( + f'# of CPUs:{os.cpu_count()} | # of GPUs:{torch.cuda.device_count()} | # of CPUs for dataloader:{self.args.num_core}') + + for i in range(torch.cuda.device_count()): + print(torch.cuda.get_device_name(i)) + +
+[docs] + def continual_start(self): + """ + (1) Initialize training. + (2) Load model + (3) Load trainer + (3) Fit model + + Parameter + --------- + + Returns + ------- + model: + form_of_labelling: str + """ + + self.trainer = self.initialize_trainer(callbacks=get_callbacks(self.args)) + model, form_of_labelling = self.initialize_or_load_model() + assert form_of_labelling in ['EntityPrediction', 'RelationPrediction', 'Pyke'] + assert self.args.scoring_technique in ['KvsSample', '1vsAll', 'KvsAll', 'NegSample'] + train_loader = self.initialize_dataloader( + reload_dataset(path=self.storage_path, form_of_labelling=form_of_labelling, + scoring_technique=self.args.scoring_technique, + neg_ratio=self.args.neg_ratio, + label_smoothing_rate=self.args.label_smoothing_rate)) + self.trainer.fit(model, train_dataloaders=train_loader) + return model, form_of_labelling
+ + +
+[docs] + @timeit + def initialize_trainer(self, callbacks: List) -> pl.Trainer: + """ Initialize Trainer from input arguments """ + return initialize_trainer(self.args, callbacks)
+ + +
+[docs] + @timeit + def initialize_or_load_model(self): + print('Initializing Model...', end='\t') + model, form_of_labelling = select_model(vars(self.args), self.is_continual_training, self.storage_path) + self.report['form_of_labelling'] = form_of_labelling + assert form_of_labelling in ['EntityPrediction', 'RelationPrediction'] + return model, form_of_labelling
+ + +
+[docs] + @timeit + def initialize_dataloader(self, dataset: torch.utils.data.Dataset) -> torch.utils.data.DataLoader: + print('Initializing Dataloader...', end='\t') + # https://pytorch.org/docs/stable/data.html#multi-process-data-loading + # https://github.com/pytorch/pytorch/issues/13246#issuecomment-905703662 + return torch.utils.data.DataLoader(dataset=dataset, batch_size=self.args.batch_size, + shuffle=True, collate_fn=dataset.collate_fn, + num_workers=self.args.num_core, persistent_workers=False)
+ + +
+[docs] + @timeit + def initialize_dataset(self, dataset, form_of_labelling) -> torch.utils.data.Dataset: + print('Initializing Dataset...', end='\t') + train_dataset = construct_dataset(train_set=dataset.train_set, + valid_set=dataset.valid_set, + test_set=dataset.test_set, + entity_to_idx=dataset.entity_to_idx, + relation_to_idx=dataset.relation_to_idx, + form_of_labelling=form_of_labelling, + scoring_technique=self.args.scoring_technique, + neg_ratio=self.args.neg_ratio, + label_smoothing_rate=self.args.label_smoothing_rate) + if self.args.eval_model is None: + del dataset.train_set + gc.collect() + return train_dataset
+ + +
+[docs] + def start(self, dataset: KG) -> Tuple[BaseKGE, str]: + """ Train selected model via the selected training strategy """ + print('------------------- Train -------------------') + # (1) Perform K-fold CV + if self.args.num_folds_for_cv >= 2: + return self.k_fold_cross_validation(dataset) + else: + self.trainer: Union[TorchTrainer, TorchDDPTrainer, pl.Trainer] + self.trainer = self.initialize_trainer(callbacks=get_callbacks(self.args)) + model, form_of_labelling = self.initialize_or_load_model() + self.trainer.evaluator = self.evaluator + # @TODO Why do we need to sent the dataset ? + self.trainer.dataset = dataset + self.trainer.form_of_labelling = form_of_labelling + self.trainer.fit(model, train_dataloaders=self.initialize_dataloader( + self.initialize_dataset(dataset, form_of_labelling))) + return model, form_of_labelling
+ + +
+[docs] + def k_fold_cross_validation(self, dataset) -> Tuple[BaseKGE, str]: + """ + Perform K-fold Cross-Validation + + 1. Obtain K train and test splits. + 2. For each split, + 2.1 initialize trainer and model + 2.2. Train model with configuration provided in args. + 2.3. Compute the mean reciprocal rank (MRR) score of the model on the test respective split. + 3. Report the mean and average MRR . + + :param self: + :param dataset: + :return: model + """ + print(f'{self.args.num_folds_for_cv}-fold cross-validation') + # (1) Create Kfold data + kf = KFold(n_splits=self.args.num_folds_for_cv, shuffle=True, random_state=1) + model = None + eval_folds = [] + form_of_labelling=None + # (2) Iterate over (1) + for (ith, (train_index, test_index)) in enumerate(kf.split(dataset.train_set)): + # (2.1) Create a new copy for the callbacks + args = copy.copy(self.args) + trainer = initialize_trainer(args, get_callbacks(args)) + model, form_of_labelling = select_model(vars(args), self.is_continual_training, self.storage_path) + print(f'{form_of_labelling} training starts: {model.name}') + + train_set_for_i_th_fold, test_set_for_i_th_fold = dataset.train_set[train_index], dataset.train_set[ + test_index] + + trainer.fit(model, train_dataloaders=self.initialize_dataloader( + construct_dataset(train_set=train_set_for_i_th_fold, + entity_to_idx=dataset.entity_to_idx, + relation_to_idx=dataset.relation_to_idx, + form_of_labelling=form_of_labelling, + scoring_technique=self.args.scoring_technique, + neg_ratio=self.args.neg_ratio, + label_smoothing_rate=self.args.label_smoothing_rate))) + + res = self.evaluator.eval_with_data(dataset=dataset, trained_model=model, triple_idx=test_set_for_i_th_fold, + form_of_labelling=form_of_labelling) + # res = self.evaluator.evaluate_lp_k_vs_all(model, test_set_for_i_th_fold, form_of_labelling=form_of_labelling) + eval_folds.append([res['MRR'], res['H@1'], res['H@3'], res['H@10']]) + eval_folds = pd.DataFrame(eval_folds, columns=['MRR', 'H@1', 'H@3', 'H@10']) + self.evaluator.report = eval_folds.to_dict() + print(eval_folds) + print(eval_folds.describe()) + # results = {'H@1': eval_folds['H@1'].mean(), 'H@3': eval_folds['H@3'].mean(), 'H@10': eval_folds['H@10'].mean(), + # 'MRR': eval_folds['MRR'].mean()} + # print(f'KFold Cross Validation Results: {results}') + return model, form_of_labelling
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/trainer/torch_trainer.html b/_modules/dicee/trainer/torch_trainer.html new file mode 100644 index 00000000..9e15f060 --- /dev/null +++ b/_modules/dicee/trainer/torch_trainer.html @@ -0,0 +1,439 @@ + + + + + + + + dicee.trainer.torch_trainer - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.trainer.torch_trainer

+import torch
+from typing import Tuple
+from dicee.abstracts import AbstractTrainer
+import time
+import os
+import psutil
+
+
+
+[docs] +class TorchTrainer(AbstractTrainer): + """ + TorchTrainer for using single GPU or multi CPUs on a single node + + Arguments + ---------- + args: ? + + callbacks: list of Abstract callback instances + + """ + + def __init__(self, args, callbacks): + super().__init__(args, callbacks) + self.loss_function = None + self.optimizer = None + self.model = None + self.train_dataloaders = None + self.training_step = None + torch.manual_seed(self.attributes.random_seed) + torch.cuda.manual_seed_all(self.attributes.random_seed) + if self.attributes.gpus and torch.cuda.is_available(): + self.device = torch.device(f'cuda:{self.attributes.gpus}' if torch.cuda.is_available() else 'cpu') + else: + self.device = 'cpu' + # https://psutil.readthedocs.io/en/latest/#psutil.Process + self.process = psutil.Process(os.getpid()) + + def _run_batch(self, i: int, x_batch, y_batch) -> float: + """ + Forward anc Backward according to a mini-batch + + Arguments + ---------- + i : index of a batch + x_batch: torch.Tensor on selected device + y_batch: torch.Tensor on selected device + Returns + ------- + batch loss (float) + """ + if self.attributes.gradient_accumulation_steps > 1: + # (1) Update parameters every gradient_accumulation_steps mini-batch. + if i % self.attributes.gradient_accumulation_steps == 0: + self.optimizer.zero_grad(set_to_none=True) + else: + # (2) Do not accumulate gradient, zero the gradients per batch. + self.optimizer.zero_grad(set_to_none=True) + # (3) Loss Forward and Backward w.r.t the batch. + return self.forward_backward_update(x_batch, y_batch) + + def _run_epoch(self, epoch: int) -> float: + """ + Iterate over the training dataset + + Arguments + ---------- + epoch:int + ------- + average loss over the dataset + """ + epoch_loss = 0 + i = 0 + construct_mini_batch_time = None + batch: list + for i, batch in enumerate(self.train_dataloaders): + # (1) Extract Input and Outputs and set them on the dice + x_batch, y_batch = self.extract_input_outputs_set_device(batch) + start_time = time.time() + if construct_mini_batch_time: + construct_mini_batch_time = start_time - construct_mini_batch_time + # (2) Forward-Backward-Update. + batch_loss = self._run_batch(i, x_batch, y_batch) + epoch_loss += batch_loss + if construct_mini_batch_time: + print( + f"Epoch:{epoch + 1} " + f"| Batch:{i + 1} " + f"| Loss:{batch_loss:.10f} " + f"| ForwardBackwardUpdate:{(time.time() - start_time):.2f}sec " + f"| BatchConst.:{construct_mini_batch_time:.2f}sec " + f"| Mem. Usage {self.process.memory_info().rss / 1_000_000: .5}MB " + f" ({psutil.virtual_memory().percent} %)") + else: + print( + f"Epoch:{epoch + 1} " + f"| Batch:{i + 1} " + f"| Loss:{batch_loss} " + f"| ForwardBackwardUpdate:{(time.time() - start_time):.2f}secs " + f"| Mem. Usage {self.process.memory_info().rss / 1_000_000: .5}MB ") + construct_mini_batch_time = time.time() + return epoch_loss / (i + 1) + +
+[docs] + def fit(self, *args, train_dataloaders, **kwargs) -> None: + """ + Training starts + + Arguments + ---------- + args:tuple + (BASEKGE,) + kwargs:Tuple + empty dictionary + Returns + ------- + batch loss (float) + """ + assert len(args) == 1 + model, = args + self.model = model + self.model.to(self.device) + self.train_dataloaders = train_dataloaders + self.loss_function = model.loss_function + self.optimizer = self.model.configure_optimizers() + self.training_step = self.model.training_step + # (1) Start running callbacks + self.on_fit_start(self, self.model) + + print(f'NumOfDataPoints:{len(self.train_dataloaders.dataset)} ' + f'| NumOfEpochs:{self.attributes.max_epochs} ' + f'| LearningRate:{self.model.learning_rate} ' + f'| BatchSize:{self.train_dataloaders.batch_size} ' + f'| EpochBatchsize:{len(train_dataloaders)}') + for epoch in range(self.attributes.max_epochs): + start_time = time.time() + + avg_epoch_loss = self._run_epoch(epoch) + print(f"Epoch:{epoch + 1} " + f"| Loss:{avg_epoch_loss:.8f} " + f"| Runtime:{(time.time() - start_time) / 60:.3f} mins") + """ + # Autobatch Finder: Double the current batch size if memory allows and repeat this process at mast 5 times. + if self.attributes.auto_batch_finder and psutil.virtual_memory().percent < 30.0 and counter < 5: + self.train_dataloaders = DataLoader(dataset=self.train_dataloaders.dataset, + batch_size=self.train_dataloaders.batch_size + + self.train_dataloaders.batch_size, + shuffle=True, collate_fn=self.train_dataloaders.dataset.collate_fn, + num_workers=self.train_dataloaders.num_workers, + persistent_workers=False) + print( + f'NumOfDataPoints:{len(self.train_dataloaders.dataset)} ' + f'| NumOfEpochs:{self.attributes.max_epochs} ' + f'| LearningRate:{self.model.learning_rate} ' + f'| BatchSize:{self.train_dataloaders.batch_size} ' + f'| EpochBatchsize:{len(train_dataloaders)}') + counter += 1 + """ + self.model.loss_history.append(avg_epoch_loss) + self.on_train_epoch_end(self, self.model) + self.on_fit_end(self, self.model)
+ + +
+[docs] + def forward_backward_update(self, x_batch: torch.Tensor, y_batch: torch.Tensor) -> torch.Tensor: + """ + Compute forward, loss, backward, and parameter update + + Arguments + ---------- + x_batch:(torch.Tensor) mini-batch inputs + y_batch:(torch.Tensor) mini-batch outputs + + Returns + ------- + batch loss (float) + """ + batch_loss = self.training_step(batch=(x_batch, y_batch)) + batch_loss.backward() + self.optimizer.step() + return batch_loss.item()
+ + +
+[docs] + def extract_input_outputs_set_device(self, batch: list) -> Tuple: + """ + Construct inputs and outputs from a batch of inputs with outputs From a batch of inputs and put + + Arguments + ---------- + batch: (list) mini-batch inputs on CPU + + Returns + ------- + (tuple) mini-batch on select device + """ + # (1) NegSample: x is a triple, y is a float + if len(batch) == 2: + x_batch, y_batch = batch + return x_batch.to(self.device), y_batch.to(self.device) + elif len(batch) == 3: + x_batch, y_idx_batch, y_batch, = batch + x_batch, y_idx_batch, y_batch = x_batch.to(self.device), y_idx_batch.to(self.device), y_batch.to( + self.device) + return (x_batch, y_idx_batch), y_batch + else: + print(len(batch)) + raise ValueError('Unexpected batch shape..')
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/dicee/trainer/torch_trainer_ddp.html b/_modules/dicee/trainer/torch_trainer_ddp.html new file mode 100644 index 00000000..b92a2f7a --- /dev/null +++ b/_modules/dicee/trainer/torch_trainer_ddp.html @@ -0,0 +1,515 @@ + + + + + + + + dicee.trainer.torch_trainer_ddp - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for dicee.trainer.torch_trainer_ddp

+import os
+import torch
+import time
+from torch.nn.parallel import DistributedDataParallel as DDP
+
+from dicee.abstracts import AbstractTrainer
+from dicee.static_funcs_training import efficient_zero_grad
+from torch.utils.data import DataLoader
+
+
+# DDP with gradiant accumulation https://gist.github.com/mcarilli/bf013d2d2f4b4dd21ade30c9b52d5e2e
+
+
+
+
+
+[docs] +class TorchDDPTrainer(AbstractTrainer): + """ + A Trainer based on torch.nn.parallel.DistributedDataParallel + + Arguments + ---------- + train_set_idx + Indexed triples for the training. + entity_idxs + mapping. + relation_idxs + mapping. + form + ? + store + ? + label_smoothing_rate + Using hard targets (0,1) drives weights to infinity. + An outlier produces enormous gradients. + + Returns + ------- + torch.utils.data.Dataset + """ + + def __init__(self, args, callbacks): + super().__init__(args, callbacks) + +
+[docs] + def fit(self, *args, **kwargs): + """ Train model """ + assert len(args) == 1 + model, = args + # (1) Run the fit the start callback. + self.on_fit_start(self, model) + # (2) Setup DDP. + torch.distributed.init_process_group(backend="nccl") + train_dataset_loader = kwargs['train_dataloaders'] + # (1) Create DATA LOADER. + train_dataset_loader = DataLoader(train_dataset_loader.dataset, batch_size=self.attributes.batch_size, + pin_memory=True, shuffle=False, num_workers=self.attributes.num_core, + persistent_workers=False, + collate_fn=kwargs['train_dataloaders'].dataset.collate_fn, + sampler=torch.utils.data.distributed.DistributedSampler( + train_dataset_loader.dataset)) + + # (2) Initialize OPTIMIZER. + optimizer = model.configure_optimizers() + # (3) Start NodeTrainer. + NodeTrainer(model, train_dataset_loader, optimizer, self.callbacks, self.attributes.num_epochs).train() + torch.distributed.destroy_process_group() + self.on_fit_end(self, model)
+
+ + + +
+[docs] +class NodeTrainer: + def __init__(self, + model: torch.nn.Module, + train_dataset_loader: DataLoader, + optimizer: torch.optim.Optimizer, + callbacks, + num_epochs: int) -> None: + # (1) Local and Global Ranks. + self.local_rank = int(os.environ["LOCAL_RANK"]) + self.global_rank = int(os.environ["RANK"]) + # (2) Send model to local trainer. (Check whether it is uncesseary as we wrap it with DDP + self.model = model.to(self.local_rank) + self.train_dataset_loader = train_dataset_loader + self.loss_func = self.model.loss + self.optimizer = optimizer + self.callbacks = callbacks + # (3) Wrap the model with DDP() along with GPU ID that model lives on. + self.model = DDP(model, device_ids=[self.local_rank]) + self.num_epochs = num_epochs + print_peak_memory("Max memory allocated after creating DDP local local_rank:", self.local_rank) + print(f'Global Rank {self.global_rank}\t Local Rank:{self.local_rank}') + print(self.model) + print(self.optimizer) + print(f'Global:{self.global_rank}' + f'|Local:{self.local_rank}' + f'|NumOfDataPoints:{len(self.train_dataset_loader.dataset)}' + f'|NumOfEpochs:{self.num_epochs}' + f'|LearningRate:{self.model.module.learning_rate}' + f'|BatchSize:{self.train_dataset_loader.batch_size}' + f'|EpochBatchsize:{len(self.train_dataset_loader)}') + + self.loss_history = [] + + def _load_snapshot(self, snapshot_path): + raise NotImplementedError + + def _run_batch(self, source, targets): + self.optimizer.zero_grad() + output = self.model(source) + loss = self.loss_func(output, targets) + batch_loss = loss.item() + loss.backward() + self.optimizer.step() + return batch_loss + +
+[docs] + def extract_input_outputs(self, z: list): + if len(z) == 2: + x_batch, y_batch = z + return x_batch.to(self.local_rank), y_batch.to(self.local_rank) + elif len(z) == 3: + x_batch, y_idx_batch, y_batch, = z + x_batch, y_idx_batch, y_batch = x_batch.to(self.local_rank), y_idx_batch.to(self.local_rank), y_batch.to( + self.local_rank) + return (x_batch, y_idx_batch), y_batch + else: + raise ValueError('Unexpected batch shape..')
+ + + def _run_epoch(self, epoch): + self.train_dataset_loader.sampler.set_epoch(epoch) + epoch_loss = 0 + i = 0 + construct_mini_batch_time = None + for i, z in enumerate(self.train_dataset_loader): + source, targets = self.extract_input_outputs(z) + start_time = time.time() + if construct_mini_batch_time: + construct_mini_batch_time = start_time - construct_mini_batch_time + batch_loss = self._run_batch(source, targets) + epoch_loss += batch_loss + if True: # self.local_rank == self.global_rank==0: + if construct_mini_batch_time: + print( + f"Global:{self.global_rank}" + f"|Local:{self.local_rank}" + f"|Epoch:{epoch + 1}" + f"|Batch:{i + 1}" + f"|Loss:{batch_loss}" + f"|ForwardBackwardUpdate:{(time.time() - start_time):.2f}sec|" + f"BatchConst.:{construct_mini_batch_time:.2f}sec") + else: + print( + f"Global:{self.global_rank}" + f"|Local:{self.local_rank}" + f"|Epoch:{epoch + 1}" + f"|Batch:{i + 1}" + f"|Loss:{batch_loss}" + f"|ForwardBackwardUpdate:{(time.time() - start_time):.2f}secs") + construct_mini_batch_time = time.time() + return epoch_loss / (i + 1) + +
+[docs] + def train(self): + for epoch in range(self.num_epochs): + start_time = time.time() + epoch_loss = self._run_epoch(epoch) + + print(f"Epoch:{epoch + 1} | Loss:{epoch_loss:.8f} | Runtime:{(time.time() - start_time) / 60:.3f}mins") + if True:#self.local_rank == self.global_rank == 0: + #print(f"Epoch:{epoch + 1} | Loss:{epoch_loss:.8f} | Runtime:{(time.time() - start_time) / 60:.3f}mins") + self.model.module.loss_history.append(epoch_loss) + for c in self.callbacks: + c.on_train_epoch_end(None, self.model.module)
+
+ + + +
+[docs] +class DDPTrainer: + def __init__(self, + model: torch.nn.Module, + train_dataset_loader: DataLoader, + optimizer: torch.optim.Optimizer, + gpu_id: int, callbacks, num_epochs) -> None: + self.gpu_id = gpu_id + self.model = model.to(gpu_id) + self.train_dataset_loader = train_dataset_loader + self.loss_func = self.model.loss + self.optimizer = optimizer + self.callbacks = callbacks + # (1) Wrap the model with DDP() along with GPU ID that model lives on. + self.model = DDP(model, device_ids=[gpu_id]) + self.num_epochs = num_epochs + print_peak_memory("Max memory allocated after creating DDP:", gpu_id) + print('GPU:{self.gpu_id}') + print(self.model) + print(self.optimizer) + print( + f'NumOfDataPoints:{len(self.train_dataset_loader.dataset)}' + f'|NumOfEpochs:{self.num_epochs}' + f'|LearningRate:{self.model.module.learning_rate}' + f'|BatchSize:{self.train_dataset_loader.batch_size}' + f'|EpochBatchsize:{len(self.train_dataset_loader)}') + + self.loss_history = [] + + def _run_batch(self, source, targets): + # (1) Zero the gradients. + # self.optimizer.zero_grad() + efficient_zero_grad(self.model) + output = self.model(source) + loss = self.loss_func(output, targets) + batch_loss = loss.item() + loss.backward() + self.optimizer.step() + # @TODO: Tips to decrease mem usage + # https://github.com/pytorch/pytorch/issues/13246#issuecomment-905703662 + # torch.cuda.empty_cache() + return batch_loss + +
+[docs] + def extract_input_outputs(self, z: list): + if len(z) == 2: + x_batch, y_batch = z + return x_batch.to(self.gpu_id), y_batch.to(self.gpu_id) + elif len(z) == 3: + x_batch, y_idx_batch, y_batch, = z + x_batch, y_idx_batch, y_batch = x_batch.to(self.gpu_id), y_idx_batch.to(self.gpu_id), y_batch.to( + self.gpu_id) + return (x_batch, y_idx_batch), y_batch + else: + raise ValueError('Unexpected batch shape..')
+ + + def _run_epoch(self, epoch): + self.train_dataset_loader.sampler.set_epoch(epoch) + epoch_loss = 0 + i = 0 + construct_mini_batch_time = None + for i, z in enumerate(self.train_dataset_loader): + source, targets = self.extract_input_outputs(z) + start_time = time.time() + if construct_mini_batch_time: + construct_mini_batch_time = start_time - construct_mini_batch_time + batch_loss = self._run_batch(source, targets) + epoch_loss += batch_loss + if self.gpu_id == 0: + if construct_mini_batch_time: + print( + f"Epoch:{epoch + 1}|Batch:{i + 1}" + f"|Loss:{batch_loss}" + f"|ForwardBackwardUpdate:{(time.time() - start_time):.2f}sec" + f"|BatchConst.:{construct_mini_batch_time:.2f}sec") + else: + print( + f"Epoch:{epoch + 1}|Batch:{i + 1}" + f"|Loss:{batch_loss}" + f"|ForwardBackwardUpdate:{(time.time() - start_time):.2f}secs") + construct_mini_batch_time = time.time() + return epoch_loss / (i + 1) + +
+[docs] + def train(self): + for epoch in range(self.num_epochs): + start_time = time.time() + epoch_loss = self._run_epoch(epoch) + if self.gpu_id == 0: + print(f"Epoch:{epoch + 1} | Loss:{epoch_loss:.8f} | Runtime:{(time.time() - start_time) / 60:.3f}mins") + self.model.module.loss_history.append(epoch_loss) + for c in self.callbacks: + c.on_train_epoch_end(None, self.model.module)
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 00000000..fb188c67 --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,256 @@ + + + + + + + + Overview: module code - DICE Embeddings 0.0.6 documentation + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+ + +
+
+ + + + + \ No newline at end of file diff --git a/_sources/dicee.models.rst.txt b/_sources/dicee.models.rst.txt new file mode 100644 index 00000000..ebc3b47e --- /dev/null +++ b/_sources/dicee.models.rst.txt @@ -0,0 +1,85 @@ +dicee.models package +==================== + +Submodules +---------- + +dicee.models.base\_model module +------------------------------- + +.. automodule:: dicee.models.base_model + :members: + :undoc-members: + :show-inheritance: + +dicee.models.clifford module +---------------------------- + +.. automodule:: dicee.models.clifford + :members: + :undoc-members: + :show-inheritance: + +dicee.models.complex module +--------------------------- + +.. automodule:: dicee.models.complex + :members: + :undoc-members: + :show-inheritance: + +dicee.models.function\_space module +----------------------------------- + +.. automodule:: dicee.models.function_space + :members: + :undoc-members: + :show-inheritance: + +dicee.models.octonion module +---------------------------- + +.. automodule:: dicee.models.octonion + :members: + :undoc-members: + :show-inheritance: + +dicee.models.pykeen\_models module +---------------------------------- + +.. automodule:: dicee.models.pykeen_models + :members: + :undoc-members: + :show-inheritance: + +dicee.models.quaternion module +------------------------------ + +.. automodule:: dicee.models.quaternion + :members: + :undoc-members: + :show-inheritance: + +dicee.models.real module +------------------------ + +.. automodule:: dicee.models.real + :members: + :undoc-members: + :show-inheritance: + +dicee.models.static\_funcs module +--------------------------------- + +.. automodule:: dicee.models.static_funcs + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: dicee.models + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/dicee.read_preprocess_save_load_kg.rst.txt b/_sources/dicee.read_preprocess_save_load_kg.rst.txt new file mode 100644 index 00000000..de13b9dc --- /dev/null +++ b/_sources/dicee.read_preprocess_save_load_kg.rst.txt @@ -0,0 +1,45 @@ +dicee.read\_preprocess\_save\_load\_kg package +============================================== + +Submodules +---------- + +dicee.read\_preprocess\_save\_load\_kg.preprocess module +-------------------------------------------------------- + +.. automodule:: dicee.read_preprocess_save_load_kg.preprocess + :members: + :undoc-members: + :show-inheritance: + +dicee.read\_preprocess\_save\_load\_kg.read\_from\_disk module +-------------------------------------------------------------- + +.. automodule:: dicee.read_preprocess_save_load_kg.read_from_disk + :members: + :undoc-members: + :show-inheritance: + +dicee.read\_preprocess\_save\_load\_kg.save\_load\_disk module +-------------------------------------------------------------- + +.. automodule:: dicee.read_preprocess_save_load_kg.save_load_disk + :members: + :undoc-members: + :show-inheritance: + +dicee.read\_preprocess\_save\_load\_kg.util module +-------------------------------------------------- + +.. automodule:: dicee.read_preprocess_save_load_kg.util + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: dicee.read_preprocess_save_load_kg + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/dicee.rst.txt b/_sources/dicee.rst.txt new file mode 100644 index 00000000..4fb6495a --- /dev/null +++ b/_sources/dicee.rst.txt @@ -0,0 +1,151 @@ +dicee package +============= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + dicee.models + dicee.read_preprocess_save_load_kg + dicee.trainer + +Submodules +---------- + +dicee.abstracts module +---------------------- + +.. automodule:: dicee.abstracts + :members: + :undoc-members: + :show-inheritance: + +dicee.callbacks module +---------------------- + +.. automodule:: dicee.callbacks + :members: + :undoc-members: + :show-inheritance: + +dicee.config module +------------------- + +.. automodule:: dicee.config + :members: + :undoc-members: + :show-inheritance: + +dicee.dataset\_classes module +----------------------------- + +.. automodule:: dicee.dataset_classes + :members: + :undoc-members: + :show-inheritance: + +dicee.evaluator module +---------------------- + +.. automodule:: dicee.evaluator + :members: + :undoc-members: + :show-inheritance: + +dicee.executer module +--------------------- + +.. automodule:: dicee.executer + :members: + :undoc-members: + :show-inheritance: + +dicee.helper\_classes module +---------------------------- + +.. automodule:: dicee.helper_classes + :members: + :undoc-members: + :show-inheritance: + +dicee.knowledge\_graph module +----------------------------- + +.. automodule:: dicee.knowledge_graph + :members: + :undoc-members: + :show-inheritance: + +dicee.knowledge\_graph\_embeddings module +----------------------------------------- + +.. automodule:: dicee.knowledge_graph_embeddings + :members: + :undoc-members: + :show-inheritance: + +dicee.query\_generator module +----------------------------- + +.. automodule:: dicee.query_generator + :members: + :undoc-members: + :show-inheritance: + +dicee.run module +---------------- + +.. automodule:: dicee.run + :members: + :undoc-members: + :show-inheritance: + +dicee.sanity\_checkers module +----------------------------- + +.. automodule:: dicee.sanity_checkers + :members: + :undoc-members: + :show-inheritance: + +dicee.static\_funcs module +-------------------------- + +.. automodule:: dicee.static_funcs + :members: + :undoc-members: + :show-inheritance: + +dicee.static\_funcs\_training module +------------------------------------ + +.. automodule:: dicee.static_funcs_training + :members: + :undoc-members: + :show-inheritance: + +dicee.static\_preprocess\_funcs module +-------------------------------------- + +.. automodule:: dicee.static_preprocess_funcs + :members: + :undoc-members: + :show-inheritance: + +dicee.types module +------------------ + +.. automodule:: dicee.types + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: dicee + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/dicee.trainer.rst.txt b/_sources/dicee.trainer.rst.txt new file mode 100644 index 00000000..6a21575b --- /dev/null +++ b/_sources/dicee.trainer.rst.txt @@ -0,0 +1,37 @@ +dicee.trainer package +===================== + +Submodules +---------- + +dicee.trainer.dice\_trainer module +---------------------------------- + +.. automodule:: dicee.trainer.dice_trainer + :members: + :undoc-members: + :show-inheritance: + +dicee.trainer.torch\_trainer module +----------------------------------- + +.. automodule:: dicee.trainer.torch_trainer + :members: + :undoc-members: + :show-inheritance: + +dicee.trainer.torch\_trainer\_ddp module +---------------------------------------- + +.. automodule:: dicee.trainer.torch_trainer_ddp + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: dicee.trainer + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt new file mode 100644 index 00000000..da19dcaa --- /dev/null +++ b/_sources/index.rst.txt @@ -0,0 +1,62 @@ +.. DICE Embeddings documentation master file, created by + sphinx-quickstart on Mon Aug 14 13:07:46 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to DICE Embeddings! +=========================================== + +`DICE Embeddings `_: Hardware-agnostic Framework for Large-scale Knowledge Graph Embeddings: +======= + + + +.. warning:: + + Train embedding models in multi-node, multi-GPUs, distributed data parallel or model parallel without expert knowledge! + + + .. code-block:: bash + + // 1 CPU + (dicee) $ python -m dicee.run --path_dataset_folder KGs/UMLS + // 10 CPU + (dicee) $ python -m dicee.run --path_dataset_folder KGs/UMLS --num_core 10 + // Distributed Data Parallel (DDP) with all GPUs + (dicee) $ python -m dicee.run --trainer PL --accelerator gpu --strategy ddp --path_dataset_folder KGs/UMLS + // Model Parallel with all GPUs and low precision + (dicee) $ python -m dicee.run --trainer PL --accelerator gpu --strategy deepspeed_stage_3 --path_dataset_folder KGs/UMLS --precision 16 + // DDP with all GPUs on two nodes (felis and nebula): + (dicee) cdemir@felis $ torchrun --nnodes 2 --nproc_per_node=gpu --node_rank 0 --rdzv_id 455 --rdzv_backend c10d --rdzv_endpoint=nebula -m dicee.main --trainer torchDDP --path_dataset_folder KGs/UMLS + (dicee) cdemir@nebula $ torchrun --nnodes 2 --nproc_per_node=gpu --node_rank 1 --rdzv_id 455 --rdzv_backend c10d --rdzv_endpoint=nebula -m dicee.main --trainer torchDDP --path_dataset_folder KGs/UMLS + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + +Usage +------- + +.. code-block:: console + + $ git clone https://github.com/dice-group/dice-embeddings.git + + $ conda create -n dicee python=3.9 --no-default-packages && conda activate dicee + + (dice) $ pip3 install -r requirements.txt + +or + +.. code-block:: console + + (dice) $ pip install dicee + + + + +Indices and tables +------- + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/_sources/modules.rst.txt b/_sources/modules.rst.txt new file mode 100644 index 00000000..160fa361 --- /dev/null +++ b/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +dicee +===== + +.. toctree:: + :maxdepth: 4 + + dicee diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 00000000..30fee9d0 --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/debug.css b/_static/debug.css new file mode 100644 index 00000000..74d4aec3 --- /dev/null +++ b/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 00000000..d06a71d7 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 00000000..a8a18f58 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '0.0.6', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/_static/file.png differ diff --git a/_static/language_data.js b/_static/language_data.js new file mode 100644 index 00000000..250f5665 --- /dev/null +++ b/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 00000000..c2e07c71 --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,258 @@ +.highlight pre { line-height: 125%; } +.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #8f5902; font-style: italic } /* Comment */ +.highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ +.highlight .g { color: #000000 } /* Generic */ +.highlight .k { color: #204a87; font-weight: bold } /* Keyword */ +.highlight .l { color: #000000 } /* Literal */ +.highlight .n { color: #000000 } /* Name */ +.highlight .o { color: #ce5c00; font-weight: bold } /* Operator */ +.highlight .x { color: #000000 } /* Other */ +.highlight .p { color: #000000; font-weight: bold } /* Punctuation */ +.highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #8f5902; font-style: italic } /* Comment.Preproc */ +.highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #a40000 } /* Generic.Deleted */ +.highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ +.highlight .ges { color: #000000; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #ef2929 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #000000; font-style: italic } /* Generic.Output */ +.highlight .gp { color: #8f5902 } /* Generic.Prompt */ +.highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ +.highlight .kc { color: #204a87; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #204a87; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #204a87; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #204a87; font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { color: #204a87; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #204a87; font-weight: bold } /* Keyword.Type */ +.highlight .ld { color: #000000 } /* Literal.Date */ +.highlight .m { color: #0000cf; font-weight: bold } /* Literal.Number */ +.highlight .s { color: #4e9a06 } /* Literal.String */ +.highlight .na { color: #c4a000 } /* Name.Attribute */ +.highlight .nb { color: #204a87 } /* Name.Builtin */ +.highlight .nc { color: #000000 } /* Name.Class */ +.highlight .no { color: #000000 } /* Name.Constant */ +.highlight .nd { color: #5c35cc; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #ce5c00 } /* Name.Entity */ +.highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #000000 } /* Name.Function */ +.highlight .nl { color: #f57900 } /* Name.Label */ +.highlight .nn { color: #000000 } /* Name.Namespace */ +.highlight .nx { color: #000000 } /* Name.Other */ +.highlight .py { color: #000000 } /* Name.Property */ +.highlight .nt { color: #204a87; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #000000 } /* Name.Variable */ +.highlight .ow { color: #204a87; font-weight: bold } /* Operator.Word */ +.highlight .pm { color: #000000; font-weight: bold } /* Punctuation.Marker */ +.highlight .w { color: #f8f8f8 } /* Text.Whitespace */ +.highlight .mb { color: #0000cf; font-weight: bold } /* Literal.Number.Bin */ +.highlight .mf { color: #0000cf; font-weight: bold } /* Literal.Number.Float */ +.highlight .mh { color: #0000cf; font-weight: bold } /* Literal.Number.Hex */ +.highlight .mi { color: #0000cf; font-weight: bold } /* Literal.Number.Integer */ +.highlight .mo { color: #0000cf; font-weight: bold } /* Literal.Number.Oct */ +.highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ +.highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ +.highlight .sc { color: #4e9a06 } /* Literal.String.Char */ +.highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ +.highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ +.highlight .se { color: #4e9a06 } /* Literal.String.Escape */ +.highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ +.highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ +.highlight .sx { color: #4e9a06 } /* Literal.String.Other */ +.highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ +.highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ +.highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ +.highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #000000 } /* Name.Function.Magic */ +.highlight .vc { color: #000000 } /* Name.Variable.Class */ +.highlight .vg { color: #000000 } /* Name.Variable.Global */ +.highlight .vi { color: #000000 } /* Name.Variable.Instance */ +.highlight .vm { color: #000000 } /* Name.Variable.Magic */ +.highlight .il { color: #0000cf; font-weight: bold } /* Literal.Number.Integer.Long */ +@media not print { +body[data-theme="dark"] .highlight pre { line-height: 125%; } +body[data-theme="dark"] .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight .hll { background-color: #404040 } +body[data-theme="dark"] .highlight { background: #202020; color: #d0d0d0 } +body[data-theme="dark"] .highlight .c { color: #ababab; font-style: italic } /* Comment */ +body[data-theme="dark"] .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ +body[data-theme="dark"] .highlight .esc { color: #d0d0d0 } /* Escape */ +body[data-theme="dark"] .highlight .g { color: #d0d0d0 } /* Generic */ +body[data-theme="dark"] .highlight .k { color: #6ebf26; font-weight: bold } /* Keyword */ +body[data-theme="dark"] .highlight .l { color: #d0d0d0 } /* Literal */ +body[data-theme="dark"] .highlight .n { color: #d0d0d0 } /* Name */ +body[data-theme="dark"] .highlight .o { color: #d0d0d0 } /* Operator */ +body[data-theme="dark"] .highlight .x { color: #d0d0d0 } /* Other */ +body[data-theme="dark"] .highlight .p { color: #d0d0d0 } /* Punctuation */ +body[data-theme="dark"] .highlight .ch { color: #ababab; font-style: italic } /* Comment.Hashbang */ +body[data-theme="dark"] .highlight .cm { color: #ababab; font-style: italic } /* Comment.Multiline */ +body[data-theme="dark"] .highlight .cp { color: #ff3a3a; font-weight: bold } /* Comment.Preproc */ +body[data-theme="dark"] .highlight .cpf { color: #ababab; font-style: italic } /* Comment.PreprocFile */ +body[data-theme="dark"] .highlight .c1 { color: #ababab; font-style: italic } /* Comment.Single */ +body[data-theme="dark"] .highlight .cs { color: #e50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body[data-theme="dark"] .highlight .gd { color: #d22323 } /* Generic.Deleted */ +body[data-theme="dark"] .highlight .ge { color: #d0d0d0; font-style: italic } /* Generic.Emph */ +body[data-theme="dark"] .highlight .ges { color: #d0d0d0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body[data-theme="dark"] .highlight .gr { color: #d22323 } /* Generic.Error */ +body[data-theme="dark"] .highlight .gh { color: #ffffff; font-weight: bold } /* Generic.Heading */ +body[data-theme="dark"] .highlight .gi { color: #589819 } /* Generic.Inserted */ +body[data-theme="dark"] .highlight .go { color: #cccccc } /* Generic.Output */ +body[data-theme="dark"] .highlight .gp { color: #aaaaaa } /* Generic.Prompt */ +body[data-theme="dark"] .highlight .gs { color: #d0d0d0; font-weight: bold } /* Generic.Strong */ +body[data-theme="dark"] .highlight .gu { color: #ffffff; text-decoration: underline } /* Generic.Subheading */ +body[data-theme="dark"] .highlight .gt { color: #d22323 } /* Generic.Traceback */ +body[data-theme="dark"] .highlight .kc { color: #6ebf26; font-weight: bold } /* Keyword.Constant */ +body[data-theme="dark"] .highlight .kd { color: #6ebf26; font-weight: bold } /* Keyword.Declaration */ +body[data-theme="dark"] .highlight .kn { color: #6ebf26; font-weight: bold } /* Keyword.Namespace */ +body[data-theme="dark"] .highlight .kp { color: #6ebf26 } /* Keyword.Pseudo */ +body[data-theme="dark"] .highlight .kr { color: #6ebf26; font-weight: bold } /* Keyword.Reserved */ +body[data-theme="dark"] .highlight .kt { color: #6ebf26; font-weight: bold } /* Keyword.Type */ +body[data-theme="dark"] .highlight .ld { color: #d0d0d0 } /* Literal.Date */ +body[data-theme="dark"] .highlight .m { color: #51b2fd } /* Literal.Number */ +body[data-theme="dark"] .highlight .s { color: #ed9d13 } /* Literal.String */ +body[data-theme="dark"] .highlight .na { color: #bbbbbb } /* Name.Attribute */ +body[data-theme="dark"] .highlight .nb { color: #2fbccd } /* Name.Builtin */ +body[data-theme="dark"] .highlight .nc { color: #71adff; text-decoration: underline } /* Name.Class */ +body[data-theme="dark"] .highlight .no { color: #40ffff } /* Name.Constant */ +body[data-theme="dark"] .highlight .nd { color: #ffa500 } /* Name.Decorator */ +body[data-theme="dark"] .highlight .ni { color: #d0d0d0 } /* Name.Entity */ +body[data-theme="dark"] .highlight .ne { color: #bbbbbb } /* Name.Exception */ +body[data-theme="dark"] .highlight .nf { color: #71adff } /* Name.Function */ +body[data-theme="dark"] .highlight .nl { color: #d0d0d0 } /* Name.Label */ +body[data-theme="dark"] .highlight .nn { color: #71adff; text-decoration: underline } /* Name.Namespace */ +body[data-theme="dark"] .highlight .nx { color: #d0d0d0 } /* Name.Other */ +body[data-theme="dark"] .highlight .py { color: #d0d0d0 } /* Name.Property */ +body[data-theme="dark"] .highlight .nt { color: #6ebf26; font-weight: bold } /* Name.Tag */ +body[data-theme="dark"] .highlight .nv { color: #40ffff } /* Name.Variable */ +body[data-theme="dark"] .highlight .ow { color: #6ebf26; font-weight: bold } /* Operator.Word */ +body[data-theme="dark"] .highlight .pm { color: #d0d0d0 } /* Punctuation.Marker */ +body[data-theme="dark"] .highlight .w { color: #666666 } /* Text.Whitespace */ +body[data-theme="dark"] .highlight .mb { color: #51b2fd } /* Literal.Number.Bin */ +body[data-theme="dark"] .highlight .mf { color: #51b2fd } /* Literal.Number.Float */ +body[data-theme="dark"] .highlight .mh { color: #51b2fd } /* Literal.Number.Hex */ +body[data-theme="dark"] .highlight .mi { color: #51b2fd } /* Literal.Number.Integer */ +body[data-theme="dark"] .highlight .mo { color: #51b2fd } /* Literal.Number.Oct */ +body[data-theme="dark"] .highlight .sa { color: #ed9d13 } /* Literal.String.Affix */ +body[data-theme="dark"] .highlight .sb { color: #ed9d13 } /* Literal.String.Backtick */ +body[data-theme="dark"] .highlight .sc { color: #ed9d13 } /* Literal.String.Char */ +body[data-theme="dark"] .highlight .dl { color: #ed9d13 } /* Literal.String.Delimiter */ +body[data-theme="dark"] .highlight .sd { color: #ed9d13 } /* Literal.String.Doc */ +body[data-theme="dark"] .highlight .s2 { color: #ed9d13 } /* Literal.String.Double */ +body[data-theme="dark"] .highlight .se { color: #ed9d13 } /* Literal.String.Escape */ +body[data-theme="dark"] .highlight .sh { color: #ed9d13 } /* Literal.String.Heredoc */ +body[data-theme="dark"] .highlight .si { color: #ed9d13 } /* Literal.String.Interpol */ +body[data-theme="dark"] .highlight .sx { color: #ffa500 } /* Literal.String.Other */ +body[data-theme="dark"] .highlight .sr { color: #ed9d13 } /* Literal.String.Regex */ +body[data-theme="dark"] .highlight .s1 { color: #ed9d13 } /* Literal.String.Single */ +body[data-theme="dark"] .highlight .ss { color: #ed9d13 } /* Literal.String.Symbol */ +body[data-theme="dark"] .highlight .bp { color: #2fbccd } /* Name.Builtin.Pseudo */ +body[data-theme="dark"] .highlight .fm { color: #71adff } /* Name.Function.Magic */ +body[data-theme="dark"] .highlight .vc { color: #40ffff } /* Name.Variable.Class */ +body[data-theme="dark"] .highlight .vg { color: #40ffff } /* Name.Variable.Global */ +body[data-theme="dark"] .highlight .vi { color: #40ffff } /* Name.Variable.Instance */ +body[data-theme="dark"] .highlight .vm { color: #40ffff } /* Name.Variable.Magic */ +body[data-theme="dark"] .highlight .il { color: #51b2fd } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +body:not([data-theme="light"]) .highlight pre { line-height: 125%; } +body:not([data-theme="light"]) .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight .hll { background-color: #404040 } +body:not([data-theme="light"]) .highlight { background: #202020; color: #d0d0d0 } +body:not([data-theme="light"]) .highlight .c { color: #ababab; font-style: italic } /* Comment */ +body:not([data-theme="light"]) .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ +body:not([data-theme="light"]) .highlight .esc { color: #d0d0d0 } /* Escape */ +body:not([data-theme="light"]) .highlight .g { color: #d0d0d0 } /* Generic */ +body:not([data-theme="light"]) .highlight .k { color: #6ebf26; font-weight: bold } /* Keyword */ +body:not([data-theme="light"]) .highlight .l { color: #d0d0d0 } /* Literal */ +body:not([data-theme="light"]) .highlight .n { color: #d0d0d0 } /* Name */ +body:not([data-theme="light"]) .highlight .o { color: #d0d0d0 } /* Operator */ +body:not([data-theme="light"]) .highlight .x { color: #d0d0d0 } /* Other */ +body:not([data-theme="light"]) .highlight .p { color: #d0d0d0 } /* Punctuation */ +body:not([data-theme="light"]) .highlight .ch { color: #ababab; font-style: italic } /* Comment.Hashbang */ +body:not([data-theme="light"]) .highlight .cm { color: #ababab; font-style: italic } /* Comment.Multiline */ +body:not([data-theme="light"]) .highlight .cp { color: #ff3a3a; font-weight: bold } /* Comment.Preproc */ +body:not([data-theme="light"]) .highlight .cpf { color: #ababab; font-style: italic } /* Comment.PreprocFile */ +body:not([data-theme="light"]) .highlight .c1 { color: #ababab; font-style: italic } /* Comment.Single */ +body:not([data-theme="light"]) .highlight .cs { color: #e50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body:not([data-theme="light"]) .highlight .gd { color: #d22323 } /* Generic.Deleted */ +body:not([data-theme="light"]) .highlight .ge { color: #d0d0d0; font-style: italic } /* Generic.Emph */ +body:not([data-theme="light"]) .highlight .ges { color: #d0d0d0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body:not([data-theme="light"]) .highlight .gr { color: #d22323 } /* Generic.Error */ +body:not([data-theme="light"]) .highlight .gh { color: #ffffff; font-weight: bold } /* Generic.Heading */ +body:not([data-theme="light"]) .highlight .gi { color: #589819 } /* Generic.Inserted */ +body:not([data-theme="light"]) .highlight .go { color: #cccccc } /* Generic.Output */ +body:not([data-theme="light"]) .highlight .gp { color: #aaaaaa } /* Generic.Prompt */ +body:not([data-theme="light"]) .highlight .gs { color: #d0d0d0; font-weight: bold } /* Generic.Strong */ +body:not([data-theme="light"]) .highlight .gu { color: #ffffff; text-decoration: underline } /* Generic.Subheading */ +body:not([data-theme="light"]) .highlight .gt { color: #d22323 } /* Generic.Traceback */ +body:not([data-theme="light"]) .highlight .kc { color: #6ebf26; font-weight: bold } /* Keyword.Constant */ +body:not([data-theme="light"]) .highlight .kd { color: #6ebf26; font-weight: bold } /* Keyword.Declaration */ +body:not([data-theme="light"]) .highlight .kn { color: #6ebf26; font-weight: bold } /* Keyword.Namespace */ +body:not([data-theme="light"]) .highlight .kp { color: #6ebf26 } /* Keyword.Pseudo */ +body:not([data-theme="light"]) .highlight .kr { color: #6ebf26; font-weight: bold } /* Keyword.Reserved */ +body:not([data-theme="light"]) .highlight .kt { color: #6ebf26; font-weight: bold } /* Keyword.Type */ +body:not([data-theme="light"]) .highlight .ld { color: #d0d0d0 } /* Literal.Date */ +body:not([data-theme="light"]) .highlight .m { color: #51b2fd } /* Literal.Number */ +body:not([data-theme="light"]) .highlight .s { color: #ed9d13 } /* Literal.String */ +body:not([data-theme="light"]) .highlight .na { color: #bbbbbb } /* Name.Attribute */ +body:not([data-theme="light"]) .highlight .nb { color: #2fbccd } /* Name.Builtin */ +body:not([data-theme="light"]) .highlight .nc { color: #71adff; text-decoration: underline } /* Name.Class */ +body:not([data-theme="light"]) .highlight .no { color: #40ffff } /* Name.Constant */ +body:not([data-theme="light"]) .highlight .nd { color: #ffa500 } /* Name.Decorator */ +body:not([data-theme="light"]) .highlight .ni { color: #d0d0d0 } /* Name.Entity */ +body:not([data-theme="light"]) .highlight .ne { color: #bbbbbb } /* Name.Exception */ +body:not([data-theme="light"]) .highlight .nf { color: #71adff } /* Name.Function */ +body:not([data-theme="light"]) .highlight .nl { color: #d0d0d0 } /* Name.Label */ +body:not([data-theme="light"]) .highlight .nn { color: #71adff; text-decoration: underline } /* Name.Namespace */ +body:not([data-theme="light"]) .highlight .nx { color: #d0d0d0 } /* Name.Other */ +body:not([data-theme="light"]) .highlight .py { color: #d0d0d0 } /* Name.Property */ +body:not([data-theme="light"]) .highlight .nt { color: #6ebf26; font-weight: bold } /* Name.Tag */ +body:not([data-theme="light"]) .highlight .nv { color: #40ffff } /* Name.Variable */ +body:not([data-theme="light"]) .highlight .ow { color: #6ebf26; font-weight: bold } /* Operator.Word */ +body:not([data-theme="light"]) .highlight .pm { color: #d0d0d0 } /* Punctuation.Marker */ +body:not([data-theme="light"]) .highlight .w { color: #666666 } /* Text.Whitespace */ +body:not([data-theme="light"]) .highlight .mb { color: #51b2fd } /* Literal.Number.Bin */ +body:not([data-theme="light"]) .highlight .mf { color: #51b2fd } /* Literal.Number.Float */ +body:not([data-theme="light"]) .highlight .mh { color: #51b2fd } /* Literal.Number.Hex */ +body:not([data-theme="light"]) .highlight .mi { color: #51b2fd } /* Literal.Number.Integer */ +body:not([data-theme="light"]) .highlight .mo { color: #51b2fd } /* Literal.Number.Oct */ +body:not([data-theme="light"]) .highlight .sa { color: #ed9d13 } /* Literal.String.Affix */ +body:not([data-theme="light"]) .highlight .sb { color: #ed9d13 } /* Literal.String.Backtick */ +body:not([data-theme="light"]) .highlight .sc { color: #ed9d13 } /* Literal.String.Char */ +body:not([data-theme="light"]) .highlight .dl { color: #ed9d13 } /* Literal.String.Delimiter */ +body:not([data-theme="light"]) .highlight .sd { color: #ed9d13 } /* Literal.String.Doc */ +body:not([data-theme="light"]) .highlight .s2 { color: #ed9d13 } /* Literal.String.Double */ +body:not([data-theme="light"]) .highlight .se { color: #ed9d13 } /* Literal.String.Escape */ +body:not([data-theme="light"]) .highlight .sh { color: #ed9d13 } /* Literal.String.Heredoc */ +body:not([data-theme="light"]) .highlight .si { color: #ed9d13 } /* Literal.String.Interpol */ +body:not([data-theme="light"]) .highlight .sx { color: #ffa500 } /* Literal.String.Other */ +body:not([data-theme="light"]) .highlight .sr { color: #ed9d13 } /* Literal.String.Regex */ +body:not([data-theme="light"]) .highlight .s1 { color: #ed9d13 } /* Literal.String.Single */ +body:not([data-theme="light"]) .highlight .ss { color: #ed9d13 } /* Literal.String.Symbol */ +body:not([data-theme="light"]) .highlight .bp { color: #2fbccd } /* Name.Builtin.Pseudo */ +body:not([data-theme="light"]) .highlight .fm { color: #71adff } /* Name.Function.Magic */ +body:not([data-theme="light"]) .highlight .vc { color: #40ffff } /* Name.Variable.Class */ +body:not([data-theme="light"]) .highlight .vg { color: #40ffff } /* Name.Variable.Global */ +body:not([data-theme="light"]) .highlight .vi { color: #40ffff } /* Name.Variable.Instance */ +body:not([data-theme="light"]) .highlight .vm { color: #40ffff } /* Name.Variable.Magic */ +body:not([data-theme="light"]) .highlight .il { color: #51b2fd } /* Literal.Number.Integer.Long */ +} +} \ No newline at end of file diff --git a/_static/scripts/furo-extensions.js b/_static/scripts/furo-extensions.js new file mode 100644 index 00000000..e69de29b diff --git a/_static/scripts/furo.js b/_static/scripts/furo.js new file mode 100644 index 00000000..32e7c05b --- /dev/null +++ b/_static/scripts/furo.js @@ -0,0 +1,3 @@ +/*! For license information please see furo.js.LICENSE.txt */ +(()=>{var t={212:function(t,e,n){var o,r;r=void 0!==n.g?n.g:"undefined"!=typeof window?window:this,o=function(){return function(t){"use strict";var e={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},n=function(t,e,n){if(n.settings.events){var o=new CustomEvent(t,{bubbles:!0,cancelable:!0,detail:n});e.dispatchEvent(o)}},o=function(t){var e=0;if(t.offsetParent)for(;t;)e+=t.offsetTop,t=t.offsetParent;return e>=0?e:0},r=function(t){t&&t.sort((function(t,e){return o(t.content)=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)},l=function(t,e){var n=t[t.length-1];if(function(t,e){return!(!s()||!c(t.content,e,!0))}(n,e))return n;for(var o=t.length-1;o>=0;o--)if(c(t[o].content,e))return t[o]},a=function(t,e){if(e.nested&&t.parentNode){var n=t.parentNode.closest("li");n&&(n.classList.remove(e.nestedClass),a(n,e))}},i=function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.remove(e.navClass),t.content.classList.remove(e.contentClass),a(o,e),n("gumshoeDeactivate",o,{link:t.nav,content:t.content,settings:e}))}},u=function(t,e){if(e.nested){var n=t.parentNode.closest("li");n&&(n.classList.add(e.nestedClass),u(n,e))}};return function(o,c){var s,a,d,f,m,v={setup:function(){s=document.querySelectorAll(o),a=[],Array.prototype.forEach.call(s,(function(t){var e=document.getElementById(decodeURIComponent(t.hash.substr(1)));e&&a.push({nav:t,content:e})})),r(a)},detect:function(){var t=l(a,m);t?d&&t.content===d.content||(i(d,m),function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.add(e.navClass),t.content.classList.add(e.contentClass),u(o,e),n("gumshoeActivate",o,{link:t.nav,content:t.content,settings:e}))}}(t,m),d=t):d&&(i(d,m),d=null)}},h=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(v.detect)},g=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame((function(){r(a),v.detect()}))};return v.destroy=function(){d&&i(d,m),t.removeEventListener("scroll",h,!1),m.reflow&&t.removeEventListener("resize",g,!1),a=null,s=null,d=null,f=null,m=null},m=function(){var t={};return Array.prototype.forEach.call(arguments,(function(e){for(var n in e){if(!e.hasOwnProperty(n))return;t[n]=e[n]}})),t}(e,c||{}),v.setup(),v.detect(),t.addEventListener("scroll",h,!1),m.reflow&&t.addEventListener("resize",g,!1),v}}(r)}.apply(e,[]),void 0===o||(t.exports=o)}},e={};function n(o){var r=e[o];if(void 0!==r)return r.exports;var c=e[o]={exports:{}};return t[o].call(c.exports,c,c.exports,n),c.exports}n.n=t=>{var e=t&&t.__esModule?()=>t.default:()=>t;return n.d(e,{a:e}),e},n.d=(t,e)=>{for(var o in e)n.o(e,o)&&!n.o(t,o)&&Object.defineProperty(t,o,{enumerable:!0,get:e[o]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),n.o=(t,e)=>Object.prototype.hasOwnProperty.call(t,e),(()=>{"use strict";var t=n(212),e=n.n(t),o=null,r=null,c=window.pageYOffset||document.documentElement.scrollTop;const s=64;function l(){const t=localStorage.getItem("theme")||"auto";var e;"light"!==(e=window.matchMedia("(prefers-color-scheme: dark)").matches?"auto"===t?"light":"light"==t?"dark":"auto":"auto"===t?"dark":"dark"==t?"light":"auto")&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto"),document.body.dataset.theme=e,localStorage.setItem("theme",e),console.log(`Changed to ${e} mode.`)}function a(){!function(){const t=document.getElementsByClassName("theme-toggle");Array.from(t).forEach((t=>{t.addEventListener("click",l)}))}(),function(){let t=0,e=!1;window.addEventListener("scroll",(function(n){t=window.scrollY,e||(window.requestAnimationFrame((function(){var n;n=t,0==Math.floor(r.getBoundingClientRect().top)?r.classList.add("scrolled"):r.classList.remove("scrolled"),function(t){tc&&document.documentElement.classList.remove("show-back-to-top"),c=t}(n),function(t){null!==o&&(0==t?o.scrollTo(0,0):Math.ceil(t)>=Math.floor(document.documentElement.scrollHeight-window.innerHeight)?o.scrollTo(0,o.scrollHeight):document.querySelector(".scroll-current"))}(n),e=!1})),e=!0)})),window.scroll()}(),null!==o&&new(e())(".toc-tree a",{reflow:!0,recursive:!0,navClass:"scroll-current",offset:()=>{let t=parseFloat(getComputedStyle(document.documentElement).fontSize);return r.getBoundingClientRect().height+.5*t+1}})}document.addEventListener("DOMContentLoaded",(function(){document.body.parentNode.classList.remove("no-js"),r=document.querySelector("header"),o=document.querySelector(".toc-scroll"),a()}))})()})(); +//# sourceMappingURL=furo.js.map \ No newline at end of file diff --git a/_static/scripts/furo.js.LICENSE.txt b/_static/scripts/furo.js.LICENSE.txt new file mode 100644 index 00000000..1632189c --- /dev/null +++ b/_static/scripts/furo.js.LICENSE.txt @@ -0,0 +1,7 @@ +/*! + * gumshoejs v5.1.2 (patched by @pradyunsg) + * A simple, framework-agnostic scrollspy script. + * (c) 2019 Chris Ferdinandi + * MIT License + * http://github.com/cferdinandi/gumshoe + */ diff --git a/_static/scripts/furo.js.map b/_static/scripts/furo.js.map new file mode 100644 index 00000000..7b7ddb11 --- /dev/null +++ b/_static/scripts/furo.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/furo.js","mappings":";iCAAA,MAQWA,SAWS,IAAX,EAAAC,EACH,EAAAA,EACkB,oBAAXC,OACPA,OACAC,KAbS,EAAF,WACP,OAaJ,SAAUD,GACR,aAMA,IAAIE,EAAW,CAEbC,SAAU,SACVC,aAAc,SAGdC,QAAQ,EACRC,YAAa,SAGbC,OAAQ,EACRC,QAAQ,EAGRC,QAAQ,GA6BNC,EAAY,SAAUC,EAAMC,EAAMC,GAEpC,GAAKA,EAAOC,SAASL,OAArB,CAGA,IAAIM,EAAQ,IAAIC,YAAYL,EAAM,CAChCM,SAAS,EACTC,YAAY,EACZL,OAAQA,IAIVD,EAAKO,cAAcJ,EAVgB,CAWrC,EAOIK,EAAe,SAAUR,GAC3B,IAAIS,EAAW,EACf,GAAIT,EAAKU,aACP,KAAOV,GACLS,GAAYT,EAAKW,UACjBX,EAAOA,EAAKU,aAGhB,OAAOD,GAAY,EAAIA,EAAW,CACpC,EAMIG,EAAe,SAAUC,GACvBA,GACFA,EAASC,MAAK,SAAUC,EAAOC,GAG7B,OAFcR,EAAaO,EAAME,SACnBT,EAAaQ,EAAMC,UACF,EACxB,CACT,GAEJ,EAwCIC,EAAW,SAAUlB,EAAME,EAAUiB,GACvC,IAAIC,EAASpB,EAAKqB,wBACd1B,EAnCU,SAAUO,GAExB,MAA+B,mBAApBA,EAASP,OACX2B,WAAWpB,EAASP,UAItB2B,WAAWpB,EAASP,OAC7B,CA2Be4B,CAAUrB,GACvB,OAAIiB,EAEAK,SAASJ,EAAOD,OAAQ,KACvB/B,EAAOqC,aAAeC,SAASC,gBAAgBC,cAG7CJ,SAASJ,EAAOS,IAAK,KAAOlC,CACrC,EAMImC,EAAa,WACf,OACEC,KAAKC,KAAK5C,EAAOqC,YAAcrC,EAAO6C,cAnCjCF,KAAKG,IACVR,SAASS,KAAKC,aACdV,SAASC,gBAAgBS,aACzBV,SAASS,KAAKE,aACdX,SAASC,gBAAgBU,aACzBX,SAASS,KAAKP,aACdF,SAASC,gBAAgBC,aAkC7B,EAmBIU,EAAY,SAAUzB,EAAUX,GAClC,IAAIqC,EAAO1B,EAASA,EAAS2B,OAAS,GACtC,GAbgB,SAAUC,EAAMvC,GAChC,SAAI4B,MAAgBZ,EAASuB,EAAKxB,QAASf,GAAU,GAEvD,CAUMwC,CAAYH,EAAMrC,GAAW,OAAOqC,EACxC,IAAK,IAAII,EAAI9B,EAAS2B,OAAS,EAAGG,GAAK,EAAGA,IACxC,GAAIzB,EAASL,EAAS8B,GAAG1B,QAASf,GAAW,OAAOW,EAAS8B,EAEjE,EAOIC,EAAmB,SAAUC,EAAK3C,GAEpC,GAAKA,EAAST,QAAWoD,EAAIC,WAA7B,CAGA,IAAIC,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASR,aAG7BkD,EAAiBG,EAAI7C,GAV0B,CAWjD,EAOIiD,EAAa,SAAUC,EAAOlD,GAEhC,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASX,UAC7B6D,EAAMnC,QAAQgC,UAAUC,OAAOhD,EAASV,cAGxCoD,EAAiBG,EAAI7C,GAGrBJ,EAAU,oBAAqBiD,EAAI,CACjCM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,EAOIoD,EAAiB,SAAUT,EAAK3C,GAElC,GAAKA,EAAST,OAAd,CAGA,IAAIsD,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASR,aAG1B4D,EAAeP,EAAI7C,GAVS,CAW9B,EA6LA,OA1JkB,SAAUsD,EAAUC,GAKpC,IACIC,EAAU7C,EAAU8C,EAASC,EAAS1D,EADtC2D,EAAa,CAUjBA,MAAmB,WAEjBH,EAAWhC,SAASoC,iBAAiBN,GAGrC3C,EAAW,GAGXkD,MAAMC,UAAUC,QAAQC,KAAKR,GAAU,SAAUjB,GAE/C,IAAIxB,EAAUS,SAASyC,eACrBC,mBAAmB3B,EAAK4B,KAAKC,OAAO,KAEjCrD,GAGLJ,EAAS0D,KAAK,CACZ1B,IAAKJ,EACLxB,QAASA,GAEb,IAGAL,EAAaC,EACf,EAKAgD,OAAoB,WAElB,IAAIW,EAASlC,EAAUzB,EAAUX,GAG5BsE,EASDb,GAAWa,EAAOvD,UAAY0C,EAAQ1C,UAG1CkC,EAAWQ,EAASzD,GAzFT,SAAUkD,EAAOlD,GAE9B,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASX,UAC1B6D,EAAMnC,QAAQgC,UAAUM,IAAIrD,EAASV,cAGrC8D,EAAeP,EAAI7C,GAGnBJ,EAAU,kBAAmBiD,EAAI,CAC/BM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,CAqEIuE,CAASD,EAAQtE,GAGjByD,EAAUa,GAfJb,IACFR,EAAWQ,EAASzD,GACpByD,EAAU,KAchB,GAMIe,EAAgB,SAAUvE,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsBf,EAAWgB,OACpD,EAMIC,EAAgB,SAAU3E,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,uBAAsB,WACrChE,EAAaC,GACbgD,EAAWgB,QACb,GACF,EAkDA,OA7CAhB,EAAWkB,QAAU,WAEfpB,GACFR,EAAWQ,EAASzD,GAItBd,EAAO4F,oBAAoB,SAAUN,GAAe,GAChDxE,EAASN,QACXR,EAAO4F,oBAAoB,SAAUF,GAAe,GAItDjE,EAAW,KACX6C,EAAW,KACXC,EAAU,KACVC,EAAU,KACV1D,EAAW,IACb,EAOEA,EA3XS,WACX,IAAI+E,EAAS,CAAC,EAOd,OANAlB,MAAMC,UAAUC,QAAQC,KAAKgB,WAAW,SAAUC,GAChD,IAAK,IAAIC,KAAOD,EAAK,CACnB,IAAKA,EAAIE,eAAeD,GAAM,OAC9BH,EAAOG,GAAOD,EAAIC,EACpB,CACF,IACOH,CACT,CAkXeK,CAAOhG,EAAUmE,GAAW,CAAC,GAGxCI,EAAW0B,QAGX1B,EAAWgB,SAGXzF,EAAOoG,iBAAiB,SAAUd,GAAe,GAC7CxE,EAASN,QACXR,EAAOoG,iBAAiB,SAAUV,GAAe,GAS9CjB,CACT,CAOF,CArcW4B,CAAQvG,EAChB,UAFM,SAEN,uBCXDwG,EAA2B,CAAC,EAGhC,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,CAAC,GAOX,OAHAE,EAAoBL,GAAU1B,KAAK8B,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAGpEK,EAAOD,OACf,CCrBAJ,EAAoBO,EAAKF,IACxB,IAAIG,EAASH,GAAUA,EAAOI,WAC7B,IAAOJ,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBU,EAAEF,EAAQ,CAAEG,EAAGH,IAC5BA,CAAM,ECLdR,EAAoBU,EAAI,CAACN,EAASQ,KACjC,IAAI,IAAInB,KAAOmB,EACXZ,EAAoBa,EAAED,EAAYnB,KAASO,EAAoBa,EAAET,EAASX,IAC5EqB,OAAOC,eAAeX,EAASX,EAAK,CAAEuB,YAAY,EAAMC,IAAKL,EAAWnB,IAE1E,ECNDO,EAAoBxG,EAAI,WACvB,GAA0B,iBAAf0H,WAAyB,OAAOA,WAC3C,IACC,OAAOxH,MAAQ,IAAIyH,SAAS,cAAb,EAChB,CAAE,MAAOC,GACR,GAAsB,iBAAX3H,OAAqB,OAAOA,MACxC,CACA,CAPuB,GCAxBuG,EAAoBa,EAAI,CAACrB,EAAK6B,IAAUP,OAAOzC,UAAUqB,eAAenB,KAAKiB,EAAK6B,4CCK9EC,EAAY,KACZC,EAAS,KACTC,EAAgB/H,OAAO6C,aAAeP,SAASC,gBAAgByF,UACnE,MAAMC,EAAmB,GA2EzB,SAASC,IACP,MAAMC,EAAeC,aAAaC,QAAQ,UAAY,OAZxD,IAAkBC,EACH,WADGA,EAaItI,OAAOuI,WAAW,gCAAgCC,QAI/C,SAAjBL,EACO,QACgB,SAAhBA,EACA,OAEA,OAIU,SAAjBA,EACO,OACgB,QAAhBA,EACA,QAEA,SA9BoB,SAATG,GAA4B,SAATA,IACzCG,QAAQC,MAAM,2BAA2BJ,yBACzCA,EAAO,QAGThG,SAASS,KAAK4F,QAAQC,MAAQN,EAC9BF,aAAaS,QAAQ,QAASP,GAC9BG,QAAQK,IAAI,cAAcR,UA0B5B,CAkDA,SAASnC,KART,WAEE,MAAM4C,EAAUzG,SAAS0G,uBAAuB,gBAChDrE,MAAMsE,KAAKF,GAASlE,SAASqE,IAC3BA,EAAI9C,iBAAiB,QAAS8B,EAAe,GAEjD,CAGEiB,GA9CF,WAEE,IAAIC,EAA6B,EAC7BC,GAAU,EAEdrJ,OAAOoG,iBAAiB,UAAU,SAAUuB,GAC1CyB,EAA6BpJ,OAAOsJ,QAE/BD,IACHrJ,OAAOwF,uBAAsB,WAzDnC,IAAuB+D,IA0DDH,EA9GkC,GAAlDzG,KAAK6G,MAAM1B,EAAO7F,wBAAwBQ,KAC5CqF,EAAOjE,UAAUM,IAAI,YAErB2D,EAAOjE,UAAUC,OAAO,YAI5B,SAAmCyF,GAC7BA,EAAYtB,EACd3F,SAASC,gBAAgBsB,UAAUC,OAAO,oBAEtCyF,EAAYxB,EACdzF,SAASC,gBAAgBsB,UAAUM,IAAI,oBAC9BoF,EAAYxB,GACrBzF,SAASC,gBAAgBsB,UAAUC,OAAO,oBAG9CiE,EAAgBwB,CAClB,CAoCEE,CAA0BF,GAlC5B,SAA6BA,GACT,OAAd1B,IAKa,GAAb0B,EACF1B,EAAU6B,SAAS,EAAG,GAGtB/G,KAAKC,KAAK2G,IACV5G,KAAK6G,MAAMlH,SAASC,gBAAgBS,aAAehD,OAAOqC,aAE1DwF,EAAU6B,SAAS,EAAG7B,EAAU7E,cAGhBV,SAASqH,cAAc,mBAc3C,CAKEC,CAAoBL,GAwDdF,GAAU,CACZ,IAEAA,GAAU,EAEd,IACArJ,OAAO6J,QACT,CA6BEC,GA1BkB,OAAdjC,GAKJ,IAAI,IAAJ,CAAY,cAAe,CACzBrH,QAAQ,EACRuJ,WAAW,EACX5J,SAAU,iBACVI,OAAQ,KACN,IAAIyJ,EAAM9H,WAAW+H,iBAAiB3H,SAASC,iBAAiB2H,UAChE,OAAOpC,EAAO7F,wBAAwBkI,OAAS,GAAMH,EAAM,CAAC,GAiBlE,CAcA1H,SAAS8D,iBAAiB,oBAT1B,WACE9D,SAASS,KAAKW,WAAWG,UAAUC,OAAO,SAE1CgE,EAASxF,SAASqH,cAAc,UAChC9B,EAAYvF,SAASqH,cAAc,eAEnCxD,GACF","sources":["webpack:///./src/furo/assets/scripts/gumshoe-patched.js","webpack:///webpack/bootstrap","webpack:///webpack/runtime/compat get default export","webpack:///webpack/runtime/define property getters","webpack:///webpack/runtime/global","webpack:///webpack/runtime/hasOwnProperty shorthand","webpack:///./src/furo/assets/scripts/furo.js"],"sourcesContent":["/*!\n * gumshoejs v5.1.2 (patched by @pradyunsg)\n * A simple, framework-agnostic scrollspy script.\n * (c) 2019 Chris Ferdinandi\n * MIT License\n * http://github.com/cferdinandi/gumshoe\n */\n\n(function (root, factory) {\n if (typeof define === \"function\" && define.amd) {\n define([], function () {\n return factory(root);\n });\n } else if (typeof exports === \"object\") {\n module.exports = factory(root);\n } else {\n root.Gumshoe = factory(root);\n }\n})(\n typeof global !== \"undefined\"\n ? global\n : typeof window !== \"undefined\"\n ? window\n : this,\n function (window) {\n \"use strict\";\n\n //\n // Defaults\n //\n\n var defaults = {\n // Active classes\n navClass: \"active\",\n contentClass: \"active\",\n\n // Nested navigation\n nested: false,\n nestedClass: \"active\",\n\n // Offset & reflow\n offset: 0,\n reflow: false,\n\n // Event support\n events: true,\n };\n\n //\n // Methods\n //\n\n /**\n * Merge two or more objects together.\n * @param {Object} objects The objects to merge together\n * @returns {Object} Merged values of defaults and options\n */\n var extend = function () {\n var merged = {};\n Array.prototype.forEach.call(arguments, function (obj) {\n for (var key in obj) {\n if (!obj.hasOwnProperty(key)) return;\n merged[key] = obj[key];\n }\n });\n return merged;\n };\n\n /**\n * Emit a custom event\n * @param {String} type The event type\n * @param {Node} elem The element to attach the event to\n * @param {Object} detail Any details to pass along with the event\n */\n var emitEvent = function (type, elem, detail) {\n // Make sure events are enabled\n if (!detail.settings.events) return;\n\n // Create a new event\n var event = new CustomEvent(type, {\n bubbles: true,\n cancelable: true,\n detail: detail,\n });\n\n // Dispatch the event\n elem.dispatchEvent(event);\n };\n\n /**\n * Get an element's distance from the top of the Document.\n * @param {Node} elem The element\n * @return {Number} Distance from the top in pixels\n */\n var getOffsetTop = function (elem) {\n var location = 0;\n if (elem.offsetParent) {\n while (elem) {\n location += elem.offsetTop;\n elem = elem.offsetParent;\n }\n }\n return location >= 0 ? location : 0;\n };\n\n /**\n * Sort content from first to last in the DOM\n * @param {Array} contents The content areas\n */\n var sortContents = function (contents) {\n if (contents) {\n contents.sort(function (item1, item2) {\n var offset1 = getOffsetTop(item1.content);\n var offset2 = getOffsetTop(item2.content);\n if (offset1 < offset2) return -1;\n return 1;\n });\n }\n };\n\n /**\n * Get the offset to use for calculating position\n * @param {Object} settings The settings for this instantiation\n * @return {Float} The number of pixels to offset the calculations\n */\n var getOffset = function (settings) {\n // if the offset is a function run it\n if (typeof settings.offset === \"function\") {\n return parseFloat(settings.offset());\n }\n\n // Otherwise, return it as-is\n return parseFloat(settings.offset);\n };\n\n /**\n * Get the document element's height\n * @private\n * @returns {Number}\n */\n var getDocumentHeight = function () {\n return Math.max(\n document.body.scrollHeight,\n document.documentElement.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.offsetHeight,\n document.body.clientHeight,\n document.documentElement.clientHeight,\n );\n };\n\n /**\n * Determine if an element is in view\n * @param {Node} elem The element\n * @param {Object} settings The settings for this instantiation\n * @param {Boolean} bottom If true, check if element is above bottom of viewport instead\n * @return {Boolean} Returns true if element is in the viewport\n */\n var isInView = function (elem, settings, bottom) {\n var bounds = elem.getBoundingClientRect();\n var offset = getOffset(settings);\n if (bottom) {\n return (\n parseInt(bounds.bottom, 10) <\n (window.innerHeight || document.documentElement.clientHeight)\n );\n }\n return parseInt(bounds.top, 10) <= offset;\n };\n\n /**\n * Check if at the bottom of the viewport\n * @return {Boolean} If true, page is at the bottom of the viewport\n */\n var isAtBottom = function () {\n if (\n Math.ceil(window.innerHeight + window.pageYOffset) >=\n getDocumentHeight()\n )\n return true;\n return false;\n };\n\n /**\n * Check if the last item should be used (even if not at the top of the page)\n * @param {Object} item The last item\n * @param {Object} settings The settings for this instantiation\n * @return {Boolean} If true, use the last item\n */\n var useLastItem = function (item, settings) {\n if (isAtBottom() && isInView(item.content, settings, true)) return true;\n return false;\n };\n\n /**\n * Get the active content\n * @param {Array} contents The content areas\n * @param {Object} settings The settings for this instantiation\n * @return {Object} The content area and matching navigation link\n */\n var getActive = function (contents, settings) {\n var last = contents[contents.length - 1];\n if (useLastItem(last, settings)) return last;\n for (var i = contents.length - 1; i >= 0; i--) {\n if (isInView(contents[i].content, settings)) return contents[i];\n }\n };\n\n /**\n * Deactivate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var deactivateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested || !nav.parentNode) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Remove the active class\n li.classList.remove(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n deactivateNested(li, settings);\n };\n\n /**\n * Deactivate a nav and content area\n * @param {Object} items The nav item and content to deactivate\n * @param {Object} settings The settings for this instantiation\n */\n var deactivate = function (items, settings) {\n // Make sure there are items to deactivate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Remove the active class from the nav and content\n li.classList.remove(settings.navClass);\n items.content.classList.remove(settings.contentClass);\n\n // Deactivate any parent navs in a nested navigation\n deactivateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeDeactivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Activate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var activateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Add the active class\n li.classList.add(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n activateNested(li, settings);\n };\n\n /**\n * Activate a nav and content area\n * @param {Object} items The nav item and content to activate\n * @param {Object} settings The settings for this instantiation\n */\n var activate = function (items, settings) {\n // Make sure there are items to activate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Add the active class to the nav and content\n li.classList.add(settings.navClass);\n items.content.classList.add(settings.contentClass);\n\n // Activate any parent navs in a nested navigation\n activateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeActivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Create the Constructor object\n * @param {String} selector The selector to use for navigation items\n * @param {Object} options User options and settings\n */\n var Constructor = function (selector, options) {\n //\n // Variables\n //\n\n var publicAPIs = {};\n var navItems, contents, current, timeout, settings;\n\n //\n // Methods\n //\n\n /**\n * Set variables from DOM elements\n */\n publicAPIs.setup = function () {\n // Get all nav items\n navItems = document.querySelectorAll(selector);\n\n // Create contents array\n contents = [];\n\n // Loop through each item, get it's matching content, and push to the array\n Array.prototype.forEach.call(navItems, function (item) {\n // Get the content for the nav item\n var content = document.getElementById(\n decodeURIComponent(item.hash.substr(1)),\n );\n if (!content) return;\n\n // Push to the contents array\n contents.push({\n nav: item,\n content: content,\n });\n });\n\n // Sort contents by the order they appear in the DOM\n sortContents(contents);\n };\n\n /**\n * Detect which content is currently active\n */\n publicAPIs.detect = function () {\n // Get the active content\n var active = getActive(contents, settings);\n\n // if there's no active content, deactivate and bail\n if (!active) {\n if (current) {\n deactivate(current, settings);\n current = null;\n }\n return;\n }\n\n // If the active content is the one currently active, do nothing\n if (current && active.content === current.content) return;\n\n // Deactivate the current content and activate the new content\n deactivate(current, settings);\n activate(active, settings);\n\n // Update the currently active content\n current = active;\n };\n\n /**\n * Detect the active content on scroll\n * Debounced for performance\n */\n var scrollHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(publicAPIs.detect);\n };\n\n /**\n * Update content sorting on resize\n * Debounced for performance\n */\n var resizeHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(function () {\n sortContents(contents);\n publicAPIs.detect();\n });\n };\n\n /**\n * Destroy the current instantiation\n */\n publicAPIs.destroy = function () {\n // Undo DOM changes\n if (current) {\n deactivate(current, settings);\n }\n\n // Remove event listeners\n window.removeEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.removeEventListener(\"resize\", resizeHandler, false);\n }\n\n // Reset variables\n contents = null;\n navItems = null;\n current = null;\n timeout = null;\n settings = null;\n };\n\n /**\n * Initialize the current instantiation\n */\n var init = function () {\n // Merge user options into defaults\n settings = extend(defaults, options || {});\n\n // Setup variables based on the current DOM\n publicAPIs.setup();\n\n // Find the currently active content\n publicAPIs.detect();\n\n // Setup event listeners\n window.addEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.addEventListener(\"resize\", resizeHandler, false);\n }\n };\n\n //\n // Initialize and return the public APIs\n //\n\n init();\n return publicAPIs;\n };\n\n //\n // Return the Constructor\n //\n\n return Constructor;\n },\n);\n","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","import Gumshoe from \"./gumshoe-patched.js\";\n\n////////////////////////////////////////////////////////////////////////////////\n// Scroll Handling\n////////////////////////////////////////////////////////////////////////////////\nvar tocScroll = null;\nvar header = null;\nvar lastScrollTop = window.pageYOffset || document.documentElement.scrollTop;\nconst GO_TO_TOP_OFFSET = 64;\n\nfunction scrollHandlerForHeader() {\n if (Math.floor(header.getBoundingClientRect().top) == 0) {\n header.classList.add(\"scrolled\");\n } else {\n header.classList.remove(\"scrolled\");\n }\n}\n\nfunction scrollHandlerForBackToTop(positionY) {\n if (positionY < GO_TO_TOP_OFFSET) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n } else {\n if (positionY < lastScrollTop) {\n document.documentElement.classList.add(\"show-back-to-top\");\n } else if (positionY > lastScrollTop) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n }\n }\n lastScrollTop = positionY;\n}\n\nfunction scrollHandlerForTOC(positionY) {\n if (tocScroll === null) {\n return;\n }\n\n // top of page.\n if (positionY == 0) {\n tocScroll.scrollTo(0, 0);\n } else if (\n // bottom of page.\n Math.ceil(positionY) >=\n Math.floor(document.documentElement.scrollHeight - window.innerHeight)\n ) {\n tocScroll.scrollTo(0, tocScroll.scrollHeight);\n } else {\n // somewhere in the middle.\n const current = document.querySelector(\".scroll-current\");\n if (current == null) {\n return;\n }\n\n // https://github.com/pypa/pip/issues/9159 This breaks scroll behaviours.\n // // scroll the currently \"active\" heading in toc, into view.\n // const rect = current.getBoundingClientRect();\n // if (0 > rect.top) {\n // current.scrollIntoView(true); // the argument is \"alignTop\"\n // } else if (rect.bottom > window.innerHeight) {\n // current.scrollIntoView(false);\n // }\n }\n}\n\nfunction scrollHandler(positionY) {\n scrollHandlerForHeader();\n scrollHandlerForBackToTop(positionY);\n scrollHandlerForTOC(positionY);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Theme Toggle\n////////////////////////////////////////////////////////////////////////////////\nfunction setTheme(mode) {\n if (mode !== \"light\" && mode !== \"dark\" && mode !== \"auto\") {\n console.error(`Got invalid theme mode: ${mode}. Resetting to auto.`);\n mode = \"auto\";\n }\n\n document.body.dataset.theme = mode;\n localStorage.setItem(\"theme\", mode);\n console.log(`Changed to ${mode} mode.`);\n}\n\nfunction cycleThemeOnce() {\n const currentTheme = localStorage.getItem(\"theme\") || \"auto\";\n const prefersDark = window.matchMedia(\"(prefers-color-scheme: dark)\").matches;\n\n if (prefersDark) {\n // Auto (dark) -> Light -> Dark\n if (currentTheme === \"auto\") {\n setTheme(\"light\");\n } else if (currentTheme == \"light\") {\n setTheme(\"dark\");\n } else {\n setTheme(\"auto\");\n }\n } else {\n // Auto (light) -> Dark -> Light\n if (currentTheme === \"auto\") {\n setTheme(\"dark\");\n } else if (currentTheme == \"dark\") {\n setTheme(\"light\");\n } else {\n setTheme(\"auto\");\n }\n }\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////////////////////////\nfunction setupScrollHandler() {\n // Taken from https://developer.mozilla.org/en-US/docs/Web/API/Document/scroll_event\n let last_known_scroll_position = 0;\n let ticking = false;\n\n window.addEventListener(\"scroll\", function (e) {\n last_known_scroll_position = window.scrollY;\n\n if (!ticking) {\n window.requestAnimationFrame(function () {\n scrollHandler(last_known_scroll_position);\n ticking = false;\n });\n\n ticking = true;\n }\n });\n window.scroll();\n}\n\nfunction setupScrollSpy() {\n if (tocScroll === null) {\n return;\n }\n\n // Scrollspy -- highlight table on contents, based on scroll\n new Gumshoe(\".toc-tree a\", {\n reflow: true,\n recursive: true,\n navClass: \"scroll-current\",\n offset: () => {\n let rem = parseFloat(getComputedStyle(document.documentElement).fontSize);\n return header.getBoundingClientRect().height + 0.5 * rem + 1;\n },\n });\n}\n\nfunction setupTheme() {\n // Attach event handlers for toggling themes\n const buttons = document.getElementsByClassName(\"theme-toggle\");\n Array.from(buttons).forEach((btn) => {\n btn.addEventListener(\"click\", cycleThemeOnce);\n });\n}\n\nfunction setup() {\n setupTheme();\n setupScrollHandler();\n setupScrollSpy();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Main entrypoint\n////////////////////////////////////////////////////////////////////////////////\nfunction main() {\n document.body.parentNode.classList.remove(\"no-js\");\n\n header = document.querySelector(\"header\");\n tocScroll = document.querySelector(\".toc-scroll\");\n\n setup();\n}\n\ndocument.addEventListener(\"DOMContentLoaded\", main);\n"],"names":["root","g","window","this","defaults","navClass","contentClass","nested","nestedClass","offset","reflow","events","emitEvent","type","elem","detail","settings","event","CustomEvent","bubbles","cancelable","dispatchEvent","getOffsetTop","location","offsetParent","offsetTop","sortContents","contents","sort","item1","item2","content","isInView","bottom","bounds","getBoundingClientRect","parseFloat","getOffset","parseInt","innerHeight","document","documentElement","clientHeight","top","isAtBottom","Math","ceil","pageYOffset","max","body","scrollHeight","offsetHeight","getActive","last","length","item","useLastItem","i","deactivateNested","nav","parentNode","li","closest","classList","remove","deactivate","items","link","activateNested","add","selector","options","navItems","current","timeout","publicAPIs","querySelectorAll","Array","prototype","forEach","call","getElementById","decodeURIComponent","hash","substr","push","active","activate","scrollHandler","cancelAnimationFrame","requestAnimationFrame","detect","resizeHandler","destroy","removeEventListener","merged","arguments","obj","key","hasOwnProperty","extend","setup","addEventListener","factory","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","n","getter","__esModule","d","a","definition","o","Object","defineProperty","enumerable","get","globalThis","Function","e","prop","tocScroll","header","lastScrollTop","scrollTop","GO_TO_TOP_OFFSET","cycleThemeOnce","currentTheme","localStorage","getItem","mode","matchMedia","matches","console","error","dataset","theme","setItem","log","buttons","getElementsByClassName","from","btn","setupTheme","last_known_scroll_position","ticking","scrollY","positionY","floor","scrollHandlerForBackToTop","scrollTo","querySelector","scrollHandlerForTOC","scroll","setupScrollHandler","recursive","rem","getComputedStyle","fontSize","height"],"sourceRoot":""} \ No newline at end of file diff --git a/_static/searchtools.js b/_static/searchtools.js new file mode 100644 index 00000000..7918c3fa --- /dev/null +++ b/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/_static/skeleton.css b/_static/skeleton.css new file mode 100644 index 00000000..467c878c --- /dev/null +++ b/_static/skeleton.css @@ -0,0 +1,296 @@ +/* Some sane resets. */ +html { + height: 100%; +} + +body { + margin: 0; + min-height: 100%; +} + +/* All the flexbox magic! */ +body, +.sb-announcement, +.sb-content, +.sb-main, +.sb-container, +.sb-container__inner, +.sb-article-container, +.sb-footer-content, +.sb-header, +.sb-header-secondary, +.sb-footer { + display: flex; +} + +/* These order things vertically */ +body, +.sb-main, +.sb-article-container { + flex-direction: column; +} + +/* Put elements in the center */ +.sb-header, +.sb-header-secondary, +.sb-container, +.sb-content, +.sb-footer, +.sb-footer-content { + justify-content: center; +} +/* Put elements at the ends */ +.sb-article-container { + justify-content: space-between; +} + +/* These elements grow. */ +.sb-main, +.sb-content, +.sb-container, +article { + flex-grow: 1; +} + +/* Because padding making this wider is not fun */ +article { + box-sizing: border-box; +} + +/* The announcements element should never be wider than the page. */ +.sb-announcement { + max-width: 100%; +} + +.sb-sidebar-primary, +.sb-sidebar-secondary { + flex-shrink: 0; + width: 17rem; +} + +.sb-announcement__inner { + justify-content: center; + + box-sizing: border-box; + height: 3rem; + + overflow-x: auto; + white-space: nowrap; +} + +/* Sidebars, with checkbox-based toggle */ +.sb-sidebar-primary, +.sb-sidebar-secondary { + position: fixed; + height: 100%; + top: 0; +} + +.sb-sidebar-primary { + left: -17rem; + transition: left 250ms ease-in-out; +} +.sb-sidebar-secondary { + right: -17rem; + transition: right 250ms ease-in-out; +} + +.sb-sidebar-toggle { + display: none; +} +.sb-sidebar-overlay { + position: fixed; + top: 0; + width: 0; + height: 0; + + transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; + + opacity: 0; + background-color: rgba(0, 0, 0, 0.54); +} + +#sb-sidebar-toggle--primary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], +#sb-sidebar-toggle--secondary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { + width: 100%; + height: 100%; + opacity: 1; + transition: width 0ms ease, height 0ms ease, opacity 250ms ease; +} + +#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { + left: 0; +} +#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { + right: 0; +} + +/* Full-width mode */ +.drop-secondary-sidebar-for-full-width-content + .hide-when-secondary-sidebar-shown { + display: none !important; +} +.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { + display: none !important; +} + +/* Mobile views */ +.sb-page-width { + width: 100%; +} + +.sb-article-container, +.sb-footer-content__inner, +.drop-secondary-sidebar-for-full-width-content .sb-article, +.drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 100vw; +} + +.sb-article, +.match-content-width { + padding: 0 1rem; + box-sizing: border-box; +} + +@media (min-width: 32rem) { + .sb-article, + .match-content-width { + padding: 0 2rem; + } +} + +/* Tablet views */ +@media (min-width: 42rem) { + .sb-article-container { + width: auto; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 42rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 46rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 46rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 50rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 50rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Tablet views */ +@media (min-width: 59rem) { + .sb-sidebar-secondary { + position: static; + } + .hide-when-secondary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 63rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 67rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Desktop views */ +@media (min-width: 76rem) { + .sb-sidebar-primary { + position: static; + } + .hide-when-primary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} + +/* Full desktop views */ +@media (min-width: 80rem) { + .sb-article, + .match-content-width { + width: 46rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } +} + +@media (min-width: 84rem) { + .sb-article, + .match-content-width { + width: 50rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } +} + +@media (min-width: 88rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-page-width { + width: 88rem; + } +} diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js new file mode 100644 index 00000000..8a96c69a --- /dev/null +++ b/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/_static/styles/furo-extensions.css b/_static/styles/furo-extensions.css new file mode 100644 index 00000000..bc447f22 --- /dev/null +++ b/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;opacity:1;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0ms}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/_static/styles/furo-extensions.css.map b/_static/styles/furo-extensions.css.map new file mode 100644 index 00000000..9ba5637f --- /dev/null +++ b/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAKE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cALA,UASA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,oBACA,CACA,wCACE,cAEJ,8BACE,UC5CN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Make it visible\n opacity: 1\n\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/_static/styles/furo.css b/_static/styles/furo.css new file mode 100644 index 00000000..3d29a218 --- /dev/null +++ b/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{-webkit-text-size-adjust:100%;line-height:1.15}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{clip:rect(0,0,0,0)!important;border:0!important;height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;white-space:nowrap!important;width:1px!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,');--icon-pencil:url('data:image/svg+xml;charset=utf-8,');--icon-abstract:url('data:image/svg+xml;charset=utf-8,');--icon-info:url('data:image/svg+xml;charset=utf-8,');--icon-flame:url('data:image/svg+xml;charset=utf-8,');--icon-question:url('data:image/svg+xml;charset=utf-8,');--icon-warning:url('data:image/svg+xml;charset=utf-8,');--icon-failure:url('data:image/svg+xml;charset=utf-8,');--icon-spark:url('data:image/svg+xml;charset=utf-8,');--color-admonition-title--caution:#ff9100;--color-admonition-title-background--caution:rgba(255,145,0,.2);--color-admonition-title--warning:#ff9100;--color-admonition-title-background--warning:rgba(255,145,0,.2);--color-admonition-title--danger:#ff5252;--color-admonition-title-background--danger:rgba(255,82,82,.2);--color-admonition-title--attention:#ff5252;--color-admonition-title-background--attention:rgba(255,82,82,.2);--color-admonition-title--error:#ff5252;--color-admonition-title-background--error:rgba(255,82,82,.2);--color-admonition-title--hint:#00c852;--color-admonition-title-background--hint:rgba(0,200,82,.2);--color-admonition-title--tip:#00c852;--color-admonition-title-background--tip:rgba(0,200,82,.2);--color-admonition-title--important:#00bfa5;--color-admonition-title-background--important:rgba(0,191,165,.2);--color-admonition-title--note:#00b0ff;--color-admonition-title-background--note:rgba(0,176,255,.2);--color-admonition-title--seealso:#448aff;--color-admonition-title-background--seealso:rgba(68,138,255,.2);--color-admonition-title--admonition-todo:grey;--color-admonition-title-background--admonition-todo:hsla(0,0%,50%,.2);--color-admonition-title:#651fff;--color-admonition-title-background:rgba(101,31,255,.2);--icon-admonition-default:var(--icon-abstract);--color-topic-title:#14b8a6;--color-topic-title-background:rgba(20,184,166,.2);--icon-topic-default:var(--icon-pencil);--color-problematic:#b30000;--color-foreground-primary:#000;--color-foreground-secondary:#5a5c63;--color-foreground-muted:#646776;--color-foreground-border:#878787;--color-background-primary:#fff;--color-background-secondary:#f8f9fb;--color-background-hover:#efeff4;--color-background-hover--transparent:#efeff400;--color-background-border:#eeebee;--color-background-item:#ccc;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2962ff;--color-brand-content:#2a5adf;--color-api-background:var(--color-background-hover--transparent);--color-api-background-hover:var(--color-background-hover);--color-api-overall:var(--color-foreground-secondary);--color-api-name:var(--color-problematic);--color-api-pre-name:var(--color-problematic);--color-api-paren:var(--color-foreground-secondary);--color-api-keyword:var(--color-foreground-primary);--color-highlight-on-target:#ffc;--color-inline-code-background:var(--color-background-secondary);--color-highlighted-background:#def;--color-highlighted-text:var(--color-foreground-primary);--color-guilabel-background:#ddeeff80;--color-guilabel-border:#bedaf580;--color-guilabel-text:var(--color-foreground-primary);--color-admonition-background:transparent;--color-table-header-background:var(--color-background-secondary);--color-table-border:var(--color-background-border);--color-card-border:var(--color-background-secondary);--color-card-background:transparent;--color-card-marginals-background:var(--color-background-secondary);--color-header-background:var(--color-background-primary);--color-header-border:var(--color-background-border);--color-header-text:var(--color-foreground-primary);--color-sidebar-background:var(--color-background-secondary);--color-sidebar-background-border:var(--color-background-border);--color-sidebar-brand-text:var(--color-foreground-primary);--color-sidebar-caption-text:var(--color-foreground-muted);--color-sidebar-link-text:var(--color-foreground-secondary);--color-sidebar-link-text--top-level:var(--color-brand-primary);--color-sidebar-item-background:var(--color-sidebar-background);--color-sidebar-item-background--current:var( --color-sidebar-item-background );--color-sidebar-item-background--hover:linear-gradient(90deg,var(--color-background-hover--transparent) 0%,var(--color-background-hover) var(--sidebar-item-spacing-horizontal),var(--color-background-hover) 100%);--color-sidebar-item-expander-background:transparent;--color-sidebar-item-expander-background--hover:var( --color-background-hover );--color-sidebar-search-text:var(--color-foreground-primary);--color-sidebar-search-background:var(--color-background-secondary);--color-sidebar-search-background--focus:var(--color-background-primary);--color-sidebar-search-border:var(--color-background-border);--color-sidebar-search-icon:var(--color-foreground-muted);--color-toc-background:var(--color-background-primary);--color-toc-title-text:var(--color-foreground-muted);--color-toc-item-text:var(--color-foreground-secondary);--color-toc-item-text--hover:var(--color-foreground-primary);--color-toc-item-text--active:var(--color-brand-primary);--color-content-foreground:var(--color-foreground-primary);--color-content-background:transparent;--color-link:var(--color-brand-content);--color-link--hover:var(--color-brand-content);--color-link-underline:var(--color-background-border);--color-link-underline--hover:var(--color-foreground-border)}.only-light{display:block!important}html body .only-dark{display:none!important}@media not print{body[data-theme=dark]{--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body[data-theme=dark] .only-light{display:none!important}body[data-theme=dark] .only-dark{display:block!important}@media(prefers-color-scheme:dark){body:not([data-theme=light]){--color-problematic:#ee5151;--color-foreground-primary:#ffffffcc;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#2b8cee;--color-brand-content:#368ce2;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body:not([data-theme=light]) .only-light{display:none!important}body:not([data-theme=light]) .only-dark{display:block!important}}}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto,body[data-theme=dark] .theme-toggle svg.theme-icon-when-dark,body[data-theme=light] .theme-toggle svg.theme-icon-when-light{display:block}body{font-family:var(--font-stack)}code,kbd,pre,samp{font-family:var(--font-stack--monospace)}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}article{line-height:1.5}h1,h2,h3,h4,h5,h6{border-radius:.5rem;font-weight:700;line-height:1.25;margin:.5rem -.5rem;padding-left:.5rem;padding-right:.5rem}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:0}h1{font-size:2.5em;margin-bottom:1rem}h1,h2{margin-top:1.75rem}h2{font-size:2em}h3{font-size:1.5em}h4{font-size:1.25em}h5{font-size:1.125em}h6{font-size:1em}small{font-size:80%;opacity:75%}p{margin-bottom:.75rem;margin-top:.5rem}hr.docutils{background-color:var(--color-background-border);border:0;height:1px;margin:2rem 0;padding:0}.centered{text-align:center}a{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}a:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link{color:inherit}a.muted-link:hover{color:var(--color-link);text-decoration-color:var(--color-link-underline--hover)}html{overflow-x:hidden;overflow-y:scroll;scroll-behavior:smooth}.sidebar-scroll,.toc-scroll,article[role=main] *{scrollbar-color:var(--color-foreground-border) transparent;scrollbar-width:thin}.sidebar-scroll::-webkit-scrollbar,.toc-scroll::-webkit-scrollbar,article[role=main] ::-webkit-scrollbar{height:.25rem;width:.25rem}.sidebar-scroll::-webkit-scrollbar-thumb,.toc-scroll::-webkit-scrollbar-thumb,article[role=main] ::-webkit-scrollbar-thumb{background-color:var(--color-foreground-border);border-radius:.125rem}body,html{background:var(--color-background-primary);color:var(--color-foreground-primary);height:100%}article{background:var(--color-content-background);color:var(--color-content-foreground);overflow-wrap:break-word}.page{display:flex;min-height:100%}.mobile-header{background-color:var(--color-header-background);border-bottom:1px solid var(--color-header-border);color:var(--color-header-text);display:none;height:var(--header-height);width:100%;z-index:10}.mobile-header.scrolled{border-bottom:none;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.mobile-header .header-center a{color:var(--color-header-text);text-decoration:none}.main{display:flex;flex:1}.sidebar-drawer{background:var(--color-sidebar-background);border-right:1px solid var(--color-sidebar-background-border);box-sizing:border-box;display:flex;justify-content:flex-end;min-width:15em;width:calc(50% - 26em)}.sidebar-container,.toc-drawer{box-sizing:border-box;width:15em}.toc-drawer{background:var(--color-toc-background);padding-right:1rem}.sidebar-sticky,.toc-sticky{display:flex;flex-direction:column;height:min(100%,100vh);height:100vh;position:sticky;top:0}.sidebar-scroll,.toc-scroll{flex-grow:1;flex-shrink:1;overflow:auto;scroll-behavior:smooth}.content{display:flex;flex-direction:column;justify-content:space-between;padding:0 3em;width:46em}.icon{display:inline-block;height:1rem;width:1rem}.icon svg{height:100%;width:100%}.announcement{align-items:center;background-color:var(--color-announcement-background);color:var(--color-announcement-text);display:flex;height:var(--header-height);overflow-x:auto}.announcement+.page{min-height:calc(100% - var(--header-height))}.announcement-content{box-sizing:border-box;min-width:100%;padding:.5rem;text-align:center;white-space:nowrap}.announcement-content a{color:var(--color-announcement-text);text-decoration-color:var(--color-announcement-text)}.announcement-content a:hover{color:var(--color-announcement-text);text-decoration-color:var(--color-link--hover)}.no-js .theme-toggle-container{display:none}.theme-toggle-container{vertical-align:middle}.theme-toggle{background:transparent;border:none;cursor:pointer;padding:0}.theme-toggle svg{color:var(--color-foreground-primary);display:none;height:1rem;vertical-align:middle;width:1rem}.theme-toggle-header{float:left;padding:1rem .5rem}.nav-overlay-icon,.toc-overlay-icon{cursor:pointer;display:none}.nav-overlay-icon .icon,.toc-overlay-icon .icon{color:var(--color-foreground-secondary);height:1rem;width:1rem}.nav-overlay-icon,.toc-header-icon{align-items:center;justify-content:center}.toc-content-icon{height:1.5rem;width:1.5rem}.content-icon-container{display:flex;float:right;gap:.5rem;margin-bottom:1rem;margin-left:1rem;margin-top:1.5rem}.content-icon-container .edit-this-page svg{color:inherit;height:1rem;width:1rem}.sidebar-toggle{display:none;position:absolute}.sidebar-toggle[name=__toc]{left:20px}.sidebar-toggle:checked{left:40px}.overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0ms,height 0ms,opacity .25s ease-out;width:0}.sidebar-overlay{z-index:20}.toc-overlay{z-index:40}.sidebar-drawer{transition:left .25s ease-in-out;z-index:30}.toc-drawer{transition:right .25s ease-in-out;z-index:50}#__navigation:checked~.sidebar-overlay{height:100%;opacity:1;width:100%}#__navigation:checked~.page .sidebar-drawer{left:0;top:0}#__toc:checked~.toc-overlay{height:100%;opacity:1;width:100%}#__toc:checked~.page .toc-drawer{right:0;top:0}.back-to-top{background:var(--color-background-primary);border-radius:1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 1px 0 hsla(220,9%,46%,.502);display:none;font-size:.8125rem;left:0;margin-left:50%;padding:.5rem .75rem .5rem .5rem;position:fixed;text-decoration:none;top:1rem;transform:translateX(-50%);z-index:10}.back-to-top svg{fill:currentColor;display:inline-block;height:1rem;width:1rem}.back-to-top span{margin-left:.25rem}.show-back-to-top .back-to-top{align-items:center;display:flex}@media(min-width:97em){html{font-size:110%}}@media(max-width:82em){.toc-content-icon{display:flex}.toc-drawer{border-left:1px solid var(--color-background-muted);height:100vh;position:fixed;right:-15em;top:0}.toc-tree{border-left:none;font-size:var(--toc-font-size--mobile)}.sidebar-drawer{width:calc(50% - 18.5em)}}@media(max-width:67em){.nav-overlay-icon{display:flex}.sidebar-drawer{height:100vh;left:-15em;position:fixed;top:0;width:15em}.toc-header-icon{display:flex}.theme-toggle-content,.toc-content-icon{display:none}.theme-toggle-header{display:block}.mobile-header{align-items:center;display:flex;justify-content:space-between;position:sticky;top:0}.mobile-header .header-left,.mobile-header .header-right{display:flex;height:var(--header-height);padding:0 var(--header-padding)}.mobile-header .header-left label,.mobile-header .header-right label{height:100%;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:100%}.nav-overlay-icon .icon,.theme-toggle svg{height:1.25rem;width:1.25rem}:target{scroll-margin-top:var(--header-height)}.back-to-top{top:calc(var(--header-height) + .5rem)}.page{flex-direction:column;justify-content:center}.content{margin-left:auto;margin-right:auto}}@media(max-width:52em){.content{overflow-x:auto;width:100%}}@media(max-width:46em){.content{padding:0 1em}article aside.sidebar{float:none;margin:1rem 0;width:100%}}.admonition,.topic{background:var(--color-admonition-background);border-radius:.2rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1);font-size:var(--admonition-font-size);margin:1rem auto;overflow:hidden;padding:0 .5rem .5rem;page-break-inside:avoid}.admonition>:nth-child(2),.topic>:nth-child(2){margin-top:0}.admonition>:last-child,.topic>:last-child{margin-bottom:0}.admonition p.admonition-title,p.topic-title{font-size:var(--admonition-title-font-size);font-weight:500;line-height:1.3;margin:0 -.5rem .5rem;padding:.4rem .5rem .4rem 2rem;position:relative}.admonition p.admonition-title:before,p.topic-title:before{content:"";height:1rem;left:.5rem;position:absolute;width:1rem}p.admonition-title{background-color:var(--color-admonition-title-background)}p.admonition-title:before{background-color:var(--color-admonition-title);-webkit-mask-image:var(--icon-admonition-default);mask-image:var(--icon-admonition-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}p.topic-title{background-color:var(--color-topic-title-background)}p.topic-title:before{background-color:var(--color-topic-title);-webkit-mask-image:var(--icon-topic-default);mask-image:var(--icon-topic-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}.admonition{border-left:.2rem solid var(--color-admonition-title)}.admonition.caution{border-left-color:var(--color-admonition-title--caution)}.admonition.caution>.admonition-title{background-color:var(--color-admonition-title-background--caution)}.admonition.caution>.admonition-title:before{background-color:var(--color-admonition-title--caution);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.warning{border-left-color:var(--color-admonition-title--warning)}.admonition.warning>.admonition-title{background-color:var(--color-admonition-title-background--warning)}.admonition.warning>.admonition-title:before{background-color:var(--color-admonition-title--warning);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.danger{border-left-color:var(--color-admonition-title--danger)}.admonition.danger>.admonition-title{background-color:var(--color-admonition-title-background--danger)}.admonition.danger>.admonition-title:before{background-color:var(--color-admonition-title--danger);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.attention{border-left-color:var(--color-admonition-title--attention)}.admonition.attention>.admonition-title{background-color:var(--color-admonition-title-background--attention)}.admonition.attention>.admonition-title:before{background-color:var(--color-admonition-title--attention);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.error{border-left-color:var(--color-admonition-title--error)}.admonition.error>.admonition-title{background-color:var(--color-admonition-title-background--error)}.admonition.error>.admonition-title:before{background-color:var(--color-admonition-title--error);-webkit-mask-image:var(--icon-failure);mask-image:var(--icon-failure)}.admonition.hint{border-left-color:var(--color-admonition-title--hint)}.admonition.hint>.admonition-title{background-color:var(--color-admonition-title-background--hint)}.admonition.hint>.admonition-title:before{background-color:var(--color-admonition-title--hint);-webkit-mask-image:var(--icon-question);mask-image:var(--icon-question)}.admonition.tip{border-left-color:var(--color-admonition-title--tip)}.admonition.tip>.admonition-title{background-color:var(--color-admonition-title-background--tip)}.admonition.tip>.admonition-title:before{background-color:var(--color-admonition-title--tip);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.important{border-left-color:var(--color-admonition-title--important)}.admonition.important>.admonition-title{background-color:var(--color-admonition-title-background--important)}.admonition.important>.admonition-title:before{background-color:var(--color-admonition-title--important);-webkit-mask-image:var(--icon-flame);mask-image:var(--icon-flame)}.admonition.note{border-left-color:var(--color-admonition-title--note)}.admonition.note>.admonition-title{background-color:var(--color-admonition-title-background--note)}.admonition.note>.admonition-title:before{background-color:var(--color-admonition-title--note);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition.seealso{border-left-color:var(--color-admonition-title--seealso)}.admonition.seealso>.admonition-title{background-color:var(--color-admonition-title-background--seealso)}.admonition.seealso>.admonition-title:before{background-color:var(--color-admonition-title--seealso);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.admonition-todo{border-left-color:var(--color-admonition-title--admonition-todo)}.admonition.admonition-todo>.admonition-title{background-color:var(--color-admonition-title-background--admonition-todo)}.admonition.admonition-todo>.admonition-title:before{background-color:var(--color-admonition-title--admonition-todo);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition-todo>.admonition-title{text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:first-child{margin-top:.125rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:last-child{margin-bottom:.75rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list>dt{font-size:var(--font-size--small);text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd:empty{margin-bottom:.5rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul{margin-left:-1.2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p:nth-child(2){margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p+p:last-child:empty{margin-bottom:0;margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{color:var(--color-api-overall)}.sig:not(.sig-inline){background:var(--color-api-background);border-radius:.25rem;font-family:var(--font-stack--monospace);font-size:var(--api-font-size);font-weight:700;margin-left:-.25rem;margin-right:-.25rem;padding:.25rem .5rem .25rem 3em;text-indent:-2.5em;transition:background .1s ease-out}.sig:not(.sig-inline):hover{background:var(--color-api-background-hover)}.sig:not(.sig-inline) a.reference .viewcode-link{font-weight:400;width:3.5rem}em.property{font-style:normal}em.property:first-child{color:var(--color-api-keyword)}.sig-name{color:var(--color-api-name)}.sig-prename{color:var(--color-api-pre-name);font-weight:400}.sig-paren{color:var(--color-api-paren)}.sig-param{font-style:normal}.versionmodified{font-style:italic}div.deprecated p,div.versionadded p,div.versionchanged p{margin-bottom:.125rem;margin-top:.125rem}.viewcode-back,.viewcode-link{float:right;text-align:right}.line-block{margin-bottom:.75rem;margin-top:.5rem}.line-block .line-block{margin-bottom:0;margin-top:0;padding-left:1rem}.code-block-caption,article p.caption,table>caption{font-size:var(--font-size--small);text-align:center}.toctree-wrapper.compound .caption,.toctree-wrapper.compound :not(.caption)>.caption-text{font-size:var(--font-size--small);margin-bottom:0;text-align:initial;text-transform:uppercase}.toctree-wrapper.compound>ul{margin-bottom:0;margin-top:0}.sig-inline,code.literal{background:var(--color-inline-code-background);border-radius:.2em;font-size:var(--font-size--small--2);padding:.1em .2em}pre.literal-block .sig-inline,pre.literal-block code.literal{font-size:inherit;padding:0}p .sig-inline,p code.literal{border:1px solid var(--color-background-border)}.sig-inline{font-family:var(--font-stack--monospace)}div[class*=" highlight-"],div[class^=highlight-]{display:flex;margin:1em 0}div[class*=" highlight-"] .table-wrapper,div[class^=highlight-] .table-wrapper,pre{margin:0;padding:0}pre{overflow:auto}article[role=main] .highlight pre{line-height:1.5}.highlight pre,pre.literal-block{font-size:var(--code-font-size);padding:.625rem .875rem}pre.literal-block{background-color:var(--color-code-background);border-radius:.2rem;color:var(--color-code-foreground);margin-bottom:1rem;margin-top:1rem}.highlight{border-radius:.2rem;width:100%}.highlight .gp,.highlight span.linenos{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlight .hll{display:block;margin-left:-.875rem;margin-right:-.875rem;padding-left:.875rem;padding-right:.875rem}.code-block-caption{background-color:var(--color-code-background);border-bottom:1px solid;border-radius:.25rem;border-bottom-left-radius:0;border-bottom-right-radius:0;border-color:var(--color-background-border);color:var(--color-code-foreground);display:flex;font-weight:300;padding:.625rem .875rem}.code-block-caption+div[class]{margin-top:0}.code-block-caption+div[class] pre{border-top-left-radius:0;border-top-right-radius:0}.highlighttable{display:block;width:100%}.highlighttable tbody{display:block}.highlighttable tr{display:flex}.highlighttable td.linenos{background-color:var(--color-code-background);border-bottom-left-radius:.2rem;border-top-left-radius:.2rem;color:var(--color-code-foreground);padding:.625rem 0 .625rem .875rem}.highlighttable .linenodiv{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;font-size:var(--code-font-size);padding-right:.875rem}.highlighttable td.code{display:block;flex:1;overflow:hidden;padding:0}.highlighttable td.code .highlight{border-bottom-left-radius:0;border-top-left-radius:0}.highlight span.linenos{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;display:inline-block;margin-right:.875rem;padding-left:0;padding-right:.875rem}.footnote-reference{font-size:var(--font-size--small--4);vertical-align:super}dl.footnote.brackets{color:var(--color-foreground-secondary);display:grid;font-size:var(--font-size--small);grid-template-columns:max-content auto}dl.footnote.brackets dt{margin:0}dl.footnote.brackets dt>.fn-backref{margin-left:.25rem}dl.footnote.brackets dt:after{content:":"}dl.footnote.brackets dt .brackets:before{content:"["}dl.footnote.brackets dt .brackets:after{content:"]"}dl.footnote.brackets dd{margin:0;padding:0 1rem}aside.footnote{color:var(--color-foreground-secondary);font-size:var(--font-size--small)}aside.footnote>span,div.citation>span{float:left;font-weight:500;padding-right:.25rem}aside.footnote>p,div.citation>p{margin-left:2rem}img{box-sizing:border-box;height:auto;max-width:100%}article .figure,article figure{border-radius:.2rem;margin:0}article .figure :last-child,article figure :last-child{margin-bottom:0}article .align-left{clear:left;float:left;margin:0 1rem 1rem}article .align-right{clear:right;float:right;margin:0 1rem 1rem}article .align-center,article .align-default{display:block;margin-left:auto;margin-right:auto;text-align:center}article table.align-default{display:table;text-align:initial}.domainindex-jumpbox,.genindex-jumpbox{border-bottom:1px solid var(--color-background-border);border-top:1px solid var(--color-background-border);padding:.25rem}.domainindex-section h2,.genindex-section h2{margin-bottom:.5rem;margin-top:.75rem}.domainindex-section ul,.genindex-section ul{margin-bottom:0;margin-top:0}ol,ul{margin-bottom:1rem;margin-top:1rem;padding-left:1.2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}ol li>p:last-child,ul li>p:last-child{margin-top:.25rem}ol li>ol,ol li>ul,ul li>ol,ul li>ul{margin-bottom:.5rem;margin-top:.5rem}ol.arabic{list-style:decimal}ol.loweralpha{list-style:lower-alpha}ol.upperalpha{list-style:upper-alpha}ol.lowerroman{list-style:lower-roman}ol.upperroman{list-style:upper-roman}.simple li>ol,.simple li>ul,.toctree-wrapper li>ol,.toctree-wrapper li>ul{margin-bottom:0;margin-top:0}.field-list dt,.option-list dt,dl.footnote dt,dl.glossary dt,dl.simple dt,dl:not([class]) dt{font-weight:500;margin-top:.25rem}.field-list dt+dt,.option-list dt+dt,dl.footnote dt+dt,dl.glossary dt+dt,dl.simple dt+dt,dl:not([class]) dt+dt{margin-top:0}.field-list dt .classifier:before,.option-list dt .classifier:before,dl.footnote dt .classifier:before,dl.glossary dt .classifier:before,dl.simple dt .classifier:before,dl:not([class]) dt .classifier:before{content:":";margin-left:.2rem;margin-right:.2rem}.field-list dd ul,.field-list dd>p:first-child,.option-list dd ul,.option-list dd>p:first-child,dl.footnote dd ul,dl.footnote dd>p:first-child,dl.glossary dd ul,dl.glossary dd>p:first-child,dl.simple dd ul,dl.simple dd>p:first-child,dl:not([class]) dd ul,dl:not([class]) dd>p:first-child{margin-top:.125rem}.field-list dd ul,.option-list dd ul,dl.footnote dd ul,dl.glossary dd ul,dl.simple dd ul,dl:not([class]) dd ul{margin-bottom:.125rem}.math-wrapper{overflow-x:auto;width:100%}div.math{position:relative;text-align:center}div.math .headerlink,div.math:focus .headerlink{display:none}div.math:hover .headerlink{display:inline-block}div.math span.eqno{position:absolute;right:.5rem;top:50%;transform:translateY(-50%);z-index:1}abbr[title]{cursor:help}.problematic{color:var(--color-problematic)}kbd:not(.compound){background-color:var(--color-background-secondary);border:1px solid var(--color-foreground-border);border-radius:.2rem;box-shadow:0 .0625rem 0 rgba(0,0,0,.2),inset 0 0 0 .125rem var(--color-background-primary);color:var(--color-foreground-primary);display:inline-block;font-size:var(--font-size--small--3);margin:0 .2rem;padding:0 .2rem;vertical-align:text-bottom}blockquote{background:var(--color-background-secondary);border-left:4px solid var(--color-background-border);margin-left:0;margin-right:0;padding:.5rem 1rem}blockquote .attribution{font-weight:600;text-align:right}blockquote.highlights,blockquote.pull-quote{font-size:1.25em}blockquote.epigraph,blockquote.pull-quote{border-left-width:0;border-radius:.5rem}blockquote.highlights{background:transparent;border-left-width:0}p .reference img{vertical-align:middle}p.rubric{font-size:1.125em;font-weight:700;line-height:1.25}dd p.rubric{font-size:var(--font-size--small);font-weight:inherit;line-height:inherit;text-transform:uppercase}article .sidebar{background-color:var(--color-background-secondary);border:1px solid var(--color-background-border);border-radius:.2rem;clear:right;float:right;margin-left:1rem;margin-right:0;width:30%}article .sidebar>*{padding-left:1rem;padding-right:1rem}article .sidebar>ol,article .sidebar>ul{padding-left:2.2rem}article .sidebar .sidebar-title{border-bottom:1px solid var(--color-background-border);font-weight:500;margin:0;padding:.5rem 1rem}.table-wrapper{margin-bottom:.5rem;margin-top:1rem;overflow-x:auto;padding:.2rem .2rem .75rem;width:100%}table.docutils{border-collapse:collapse;border-radius:.2rem;border-spacing:0;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)}table.docutils th{background:var(--color-table-header-background)}table.docutils td,table.docutils th{border-bottom:1px solid var(--color-table-border);border-left:1px solid var(--color-table-border);border-right:1px solid var(--color-table-border);padding:0 .25rem}table.docutils td p,table.docutils th p{margin:.25rem}table.docutils td:first-child,table.docutils th:first-child{border-left:none}table.docutils td:last-child,table.docutils th:last-child{border-right:none}table.docutils td.text-left,table.docutils th.text-left{text-align:left}table.docutils td.text-right,table.docutils th.text-right{text-align:right}table.docutils td.text-center,table.docutils th.text-center{text-align:center}:target{scroll-margin-top:.5rem}@media(max-width:67em){:target{scroll-margin-top:calc(.5rem + var(--header-height))}section>span:target{scroll-margin-top:calc(.8rem + var(--header-height))}}.headerlink{font-weight:100;-webkit-user-select:none;-moz-user-select:none;user-select:none}.code-block-caption>.headerlink,dl dt>.headerlink,figcaption p>.headerlink,h1>.headerlink,h2>.headerlink,h3>.headerlink,h4>.headerlink,h5>.headerlink,h6>.headerlink,p.caption>.headerlink,table>caption>.headerlink{margin-left:.5rem;visibility:hidden}.code-block-caption:hover>.headerlink,dl dt:hover>.headerlink,figcaption p:hover>.headerlink,h1:hover>.headerlink,h2:hover>.headerlink,h3:hover>.headerlink,h4:hover>.headerlink,h5:hover>.headerlink,h6:hover>.headerlink,p.caption:hover>.headerlink,table>caption:hover>.headerlink{visibility:visible}.code-block-caption>.toc-backref,dl dt>.toc-backref,figcaption p>.toc-backref,h1>.toc-backref,h2>.toc-backref,h3>.toc-backref,h4>.toc-backref,h5>.toc-backref,h6>.toc-backref,p.caption>.toc-backref,table>caption>.toc-backref{color:inherit;text-decoration-line:none}figure:hover>figcaption>p>.headerlink,table:hover>caption>.headerlink{visibility:visible}:target>h1:first-of-type,:target>h2:first-of-type,:target>h3:first-of-type,:target>h4:first-of-type,:target>h5:first-of-type,:target>h6:first-of-type,span:target~h1:first-of-type,span:target~h2:first-of-type,span:target~h3:first-of-type,span:target~h4:first-of-type,span:target~h5:first-of-type,span:target~h6:first-of-type{background-color:var(--color-highlight-on-target)}:target>h1:first-of-type code.literal,:target>h2:first-of-type code.literal,:target>h3:first-of-type code.literal,:target>h4:first-of-type code.literal,:target>h5:first-of-type code.literal,:target>h6:first-of-type code.literal,span:target~h1:first-of-type code.literal,span:target~h2:first-of-type code.literal,span:target~h3:first-of-type code.literal,span:target~h4:first-of-type code.literal,span:target~h5:first-of-type code.literal,span:target~h6:first-of-type code.literal{background-color:transparent}.literal-block-wrapper:target .code-block-caption,.this-will-duplicate-information-and-it-is-still-useful-here li :target,figure:target,table:target>caption{background-color:var(--color-highlight-on-target)}dt:target{background-color:var(--color-highlight-on-target)!important}.footnote-reference:target,.footnote>dt:target+dd{background-color:var(--color-highlight-on-target)}.guilabel{background-color:var(--color-guilabel-background);border:1px solid var(--color-guilabel-border);border-radius:.5em;color:var(--color-guilabel-text);font-size:.9em;padding:0 .3em}footer{display:flex;flex-direction:column;font-size:var(--font-size--small);margin-top:2rem}.bottom-of-page{align-items:center;border-top:1px solid var(--color-background-border);color:var(--color-foreground-secondary);display:flex;justify-content:space-between;line-height:1.5;margin-top:1rem;padding-bottom:1rem;padding-top:1rem}@media(max-width:46em){.bottom-of-page{flex-direction:column-reverse;gap:.25rem;text-align:center}}.bottom-of-page .left-details{font-size:var(--font-size--small)}.bottom-of-page .right-details{display:flex;flex-direction:column;gap:.25rem;text-align:right}.bottom-of-page .icons{display:flex;font-size:1rem;gap:.25rem;justify-content:flex-end}.bottom-of-page .icons a{text-decoration:none}.bottom-of-page .icons img,.bottom-of-page .icons svg{font-size:1.125rem;height:1em;width:1em}.related-pages a{align-items:center;display:flex;text-decoration:none}.related-pages a:hover .page-info .title{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}.related-pages a svg.furo-related-icon,.related-pages a svg.furo-related-icon>use{color:var(--color-foreground-border);flex-shrink:0;height:.75rem;margin:0 .5rem;width:.75rem}.related-pages a.next-page{clear:right;float:right;max-width:50%;text-align:right}.related-pages a.prev-page{clear:left;float:left;max-width:50%}.related-pages a.prev-page svg{transform:rotate(180deg)}.page-info{display:flex;flex-direction:column;overflow-wrap:anywhere}.next-page .page-info{align-items:flex-end}.page-info .context{align-items:center;color:var(--color-foreground-muted);display:flex;font-size:var(--font-size--small);padding-bottom:.1rem;text-decoration:none}ul.search{list-style:none;padding-left:0}ul.search li{border-bottom:1px solid var(--color-background-border);padding:1rem 0}[role=main] .highlighted{background-color:var(--color-highlighted-background);color:var(--color-highlighted-text)}.sidebar-brand{display:flex;flex-direction:column;flex-shrink:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none}.sidebar-brand-text{color:var(--color-sidebar-brand-text);font-size:1.5rem;overflow-wrap:break-word}.sidebar-brand-text,.sidebar-logo-container{margin:var(--sidebar-item-spacing-vertical) 0}.sidebar-logo{display:block;margin:0 auto;max-width:100%}.sidebar-search-container{align-items:center;background:var(--color-sidebar-search-background);display:flex;margin-top:var(--sidebar-search-space-above);position:relative}.sidebar-search-container:focus-within,.sidebar-search-container:hover{background:var(--color-sidebar-search-background--focus)}.sidebar-search-container:before{background-color:var(--color-sidebar-search-icon);content:"";height:var(--sidebar-search-icon-size);left:var(--sidebar-item-spacing-horizontal);-webkit-mask-image:var(--icon-search);mask-image:var(--icon-search);position:absolute;width:var(--sidebar-search-icon-size)}.sidebar-search{background:transparent;border:none;border-bottom:1px solid var(--color-sidebar-search-border);border-top:1px solid var(--color-sidebar-search-border);box-sizing:border-box;color:var(--color-sidebar-search-foreground);padding:var(--sidebar-search-input-spacing-vertical) var(--sidebar-search-input-spacing-horizontal) var(--sidebar-search-input-spacing-vertical) calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size));width:100%;z-index:10}.sidebar-search:focus{outline:none}.sidebar-search::-moz-placeholder{font-size:var(--sidebar-search-input-font-size)}.sidebar-search::placeholder{font-size:var(--sidebar-search-input-font-size)}#searchbox .highlight-link{margin:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0;text-align:center}#searchbox .highlight-link a{color:var(--color-sidebar-search-icon);font-size:var(--font-size--small--2)}.sidebar-tree{font-size:var(--sidebar-item-font-size);margin-bottom:var(--sidebar-item-spacing-vertical);margin-top:var(--sidebar-tree-space-above)}.sidebar-tree ul{display:flex;flex-direction:column;list-style:none;margin-bottom:0;margin-top:0;padding:0}.sidebar-tree li{margin:0;position:relative}.sidebar-tree li>ul{margin-left:var(--sidebar-item-spacing-horizontal)}.sidebar-tree .icon,.sidebar-tree .reference{color:var(--color-sidebar-link-text)}.sidebar-tree .reference{box-sizing:border-box;display:inline-block;height:100%;line-height:var(--sidebar-item-line-height);overflow-wrap:anywhere;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none;width:100%}.sidebar-tree .reference:hover{background:var(--color-sidebar-item-background--hover)}.sidebar-tree .reference.external:after{color:var(--color-sidebar-link-text);content:url("data:image/svg+xml;charset=utf-8,%3Csvg width='12' height='12' xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' stroke-width='1.5' stroke='%23607D8B' fill='none' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M0 0h24v24H0z' stroke='none'/%3E%3Cpath d='M11 7H6a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h9a2 2 0 0 0 2-2v-5M10 14 20 4M15 4h5v5'/%3E%3C/svg%3E");margin:0 .25rem;vertical-align:middle}.sidebar-tree .current-page>.reference{font-weight:700}.sidebar-tree label{align-items:center;cursor:pointer;display:flex;height:var(--sidebar-item-height);justify-content:center;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:var(--sidebar-expander-width)}.sidebar-tree .caption,.sidebar-tree :not(.caption)>.caption-text{color:var(--color-sidebar-caption-text);font-size:var(--sidebar-caption-font-size);font-weight:700;margin:var(--sidebar-caption-space-above) 0 0 0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-transform:uppercase}.sidebar-tree li.has-children>.reference{padding-right:var(--sidebar-expander-width)}.sidebar-tree .toctree-l1>.reference,.sidebar-tree .toctree-l1>label .icon{color:var(--color-sidebar-link-text--top-level)}.sidebar-tree label{background:var(--color-sidebar-item-expander-background)}.sidebar-tree label:hover{background:var(--color-sidebar-item-expander-background--hover)}.sidebar-tree .current>.reference{background:var(--color-sidebar-item-background--current)}.sidebar-tree .current>.reference:hover{background:var(--color-sidebar-item-background--hover)}.toctree-checkbox{display:none;position:absolute}.toctree-checkbox~ul{display:none}.toctree-checkbox~label .icon svg{transform:rotate(90deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label .icon svg{transform:rotate(-90deg)}.toc-title-container{padding:var(--toc-title-padding);padding-top:var(--toc-spacing-vertical)}.toc-title{color:var(--color-toc-title-text);font-size:var(--toc-title-font-size);padding-left:var(--toc-spacing-horizontal);text-transform:uppercase}.no-toc{display:none}.toc-tree-container{padding-bottom:var(--toc-spacing-vertical)}.toc-tree{border-left:1px solid var(--color-background-border);font-size:var(--toc-font-size);line-height:1.3;padding-left:calc(var(--toc-spacing-horizontal) - var(--toc-item-spacing-horizontal))}.toc-tree>ul>li:first-child{padding-top:0}.toc-tree>ul>li:first-child>ul{padding-left:0}.toc-tree>ul>li:first-child>a{display:none}.toc-tree ul{list-style-type:none;margin-bottom:0;margin-top:0;padding-left:var(--toc-item-spacing-horizontal)}.toc-tree li{padding-top:var(--toc-item-spacing-vertical)}.toc-tree li.scroll-current>.reference{color:var(--color-toc-item-text--active);font-weight:700}.toc-tree .reference{color:var(--color-toc-item-text);overflow-wrap:anywhere;text-decoration:none}.toc-scroll{max-height:100vh;overflow-y:scroll}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here){background:rgba(255,0,0,.25);color:var(--color-problematic)}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here):before{content:"ERROR: Adding a table of contents in Furo-based documentation is unnecessary, and does not work well with existing styling.Add a 'this-will-duplicate-information-and-it-is-still-useful-here' class, if you want an escape hatch."}.text-align\:left>p{text-align:left}.text-align\:center>p{text-align:center}.text-align\:right>p{text-align:right} +/*# sourceMappingURL=furo.css.map*/ \ No newline at end of file diff --git a/_static/styles/furo.css.map b/_static/styles/furo.css.map new file mode 100644 index 00000000..d1dfb109 --- /dev/null +++ b/_static/styles/furo.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo.css","mappings":"AAAA,2EAA2E,CAU3E,KAEE,6BAA8B,CAD9B,gBAEF,CASA,KACE,QACF,CAMA,KACE,aACF,CAOA,GACE,aAAc,CACd,cACF,CAUA,GACE,sBAAuB,CACvB,QAAS,CACT,gBACF,CAOA,IACE,+BAAiC,CACjC,aACF,CASA,EACE,4BACF,CAOA,YACE,kBAAmB,CACnB,yBAA0B,CAC1B,gCACF,CAMA,SAEE,kBACF,CAOA,cAGE,+BAAiC,CACjC,aACF,CAeA,QAEE,aAAc,CACd,aAAc,CACd,iBAAkB,CAClB,uBACF,CAEA,IACE,aACF,CAEA,IACE,SACF,CASA,IACE,iBACF,CAUA,sCAKE,mBAAoB,CACpB,cAAe,CACf,gBAAiB,CACjB,QACF,CAOA,aAEE,gBACF,CAOA,cAEE,mBACF,CAMA,gDAIE,yBACF,CAMA,wHAIE,iBAAkB,CAClB,SACF,CAMA,4GAIE,6BACF,CAMA,SACE,0BACF,CASA,OACE,qBAAsB,CACtB,aAAc,CACd,aAAc,CACd,cAAe,CACf,SAAU,CACV,kBACF,CAMA,SACE,uBACF,CAMA,SACE,aACF,CAOA,6BAEE,qBAAsB,CACtB,SACF,CAMA,kFAEE,WACF,CAOA,cACE,4BAA6B,CAC7B,mBACF,CAMA,yCACE,uBACF,CAOA,6BACE,yBAA0B,CAC1B,YACF,CASA,QACE,aACF,CAMA,QACE,iBACF,CAiBA,kBACE,YACF,CCvVA,aAcE,kEACE,uBAOF,WACE,iDAMF,gCACE,wBAEF,qCAEE,uBADA,uBACA,CAEF,SACE,wBAtBA,CCpBJ,iBAOE,6BAEA,mBANA,qBAEA,sBACA,0BAFA,oBAHA,4BAOA,6BANA,mBAOA,CAEF,gBACE,aCPF,KCGE,mHAEA,wGAGA,wBAAyB,CACzB,wBAAyB,CACzB,4BAA6B,CAC7B,yBAA0B,CAC1B,2BAA4B,CAG5B,sDAAuD,CACvD,gDAAiD,CACjD,wDAAyD,CAGzD,0CAA2C,CAC3C,gDAAiD,CACjD,gDAAiD,CAKjD,gCAAiC,CACjC,sCAAuC,CAGvC,2CAA4C,CAG5C,uCAAwC,CChCxC,+FAGA,uBAAwB,CAGxB,iCAAkC,CAClC,kCAAmC,CAEnC,+BAAgC,CAChC,sCAAuC,CACvC,sCAAuC,CACvC,qGAIA,mDAAoD,CAEpD,mCAAoC,CACpC,8CAA+C,CAC/C,gDAAiD,CACjD,kCAAmC,CACnC,6DAA8D,CAG9D,6BAA8B,CAC9B,6BAA8B,CAC9B,+BAAgC,CAChC,kCAAmC,CACnC,kCAAmC,CCPjC,ukBCYA,srCAZF,kaCVA,mLAOA,oTAWA,2UAaA,0CACA,gEACA,0CAGA,gEAUA,yCACA,+DAGA,4CACA,CACA,iEAGA,sGACA,uCACA,4DAGA,sCACA,2DAEA,4CACA,kEACA,oGACA,CAEA,0GACA,+CAGA,+MAOA,+EACA,wCAIA,4DACA,sEACA,kEACA,sEACA,gDAGA,+DACA,0CACA,gEACA,gGACA,CAGA,2DACA,qDAGA,0CACA,8CACA,oDACA,oDL7GF,iCAEA,iEAME,oCKyGA,yDAIA,sCACA,kCACA,sDAGA,0CACA,kEACA,oDAEA,sDAGA,oCACA,oEAIA,CAGA,yDAGA,qDACA,oDAGA,6DAIA,iEAGA,2DAEA,2DL9IE,4DAEA,gEAIF,gEKgGA,gFAIA,oNAOA,qDAEA,gFAIA,4DAIA,oEAMA,yEAIA,6DACA,0DAGA,uDAGA,qDAEA,wDLpII,6DAEA,yDACE,2DAMN,uCAIA,yCACE,8CAGF,sDMjDA,6DAKA,oCAIA,4CACA,kBAGF,sBAMA,2BAME,qCAGA,qCAEA,iCAEA,+BAEA,mCAEA,qCAIA,CACA,gCACA,gDAKA,kCAIA,6BAEA,0CAQA,kCAIF,8BAGE,8BACA,uCAGF,sCAKE,kCAEA,sDAGA,iCACE,CACA,2FAGA,gCACE,CACA,+DCzEJ,wCAEA,sBAEF,yDAEE,mCACA,wDAGA,2GAGA,wIACE,gDAMJ,kCAGE,6BACA,0CAGA,gEACA,8BACA,uCAKA,sCAIA,kCACA,sDACA,iCACA,sCAOA,sDAKE,gGAIE,+CAGN,sBAEE,yCAMA,0BAMA,yLAMA,aACA,MAEF,6BACE,2DAIF,wCAIE,kCAGA,SACA,kCAKA,mBAGA,CAJA,eACA,CAHF,gBAEE,CAWA,mBACA,mBACA,mDAGA,YACA,CACA,kBACA,CAEE,kBAKJ,OAPE,kBAQA,CADF,GACE,iCACA,wCAEA,wBACA,aACA,CAFA,WAEA,GACA,oBACA,CAFA,gBAEA,aACE,+CAIF,UAJE,kCAIF,WACA,iBACA,GAGA,uBACE,CAJF,yBAGA,CACE,iDACA,uCAEA,yDACE,cACA,wDAKN,yDAIE,uBAEF,kBACE,uBAEA,kDAIA,0DAGA,CAHA,oBAGA,0GAYA,aAEA,CAHA,YAGA,4HAKF,+CAGE,sBAEF,WAKE,0CAEA,CALA,qCAGA,CAJA,WAOA,SAIA,2CAJA,qCAIA,CACE,wBACA,OACA,YAEJ,gBACE,gBAIA,+CAKF,CAGE,kDAGA,CANF,8BAGE,CAGA,YAEA,CAdF,2BACE,CAHA,UAEF,CAYE,UAEA,CACA,0CACF,iEAOE,iCACA,8BAGA,wCAIA,wBAKE,0CAKF,CARE,6DAGA,CALF,qBAEE,CASA,YACA,yBAGA,CAEE,cAKN,CAPI,sBAOJ,gCAGE,qBAEA,WACA,aACA,sCAEA,mBACA,6BAGA,uEADA,qBACA,6BAIA,yBACA,qCAEE,UAEA,YACA,sBAEF,8BAGA,CAPE,aACA,WAMF,4BACE,sBACA,WAMJ,uBACE,cAYE,mBAXA,qDAKA,qCAGA,CAEA,YACA,CAHA,2BAEA,CACA,oCAEA,4CACA,uBAIA,oCAEJ,CAFI,cAIF,iBACE,CAHJ,kBAGI,yBAEA,oCAIA,qDAMF,mEAEA,CACE,8CAKA,gCAEA,qCAGA,oCAGE,sBACA,CAJF,WAEE,CAFF,eAEE,SAEA,mBACA,qCACE,aACA,CAFF,YADA,qBACA,WAEE,sBACA,kEAEN,2BAEE,iDAKA,uCAGF,CACE,0DAKA,kBACF,CAFE,sBAGA,mBACA,0BAEJ,yBAII,aADA,WACA,CAMF,UAFE,kBAEF,CAJF,gBACE,CAHE,iBAMF,6CC9ZF,yBACE,WACA,iBAEA,aAFA,iBAEA,6BAEA,kCACA,mBAKA,gCAGA,CARA,QAEA,CAGA,UALA,qBAEA,qDAGA,CALA,OAQA,4BACE,cAGF,2BACE,gCAEJ,CAHE,UAGF,8CAGE,CAHF,UAGE,wCAGA,qBACA,CAFA,UAEA,6CAGA,yCAIA,sBAHA,UAGA,kCACE,OACA,CAFF,KAEE,cAQF,0CACE,CAFF,kBACA,CACE,wEACA,CARA,YACA,CAKF,mBAFF,OAII,eACA,CAJF,iCAJE,cAGJ,CANI,oBAEA,CAKF,SAIE,2BADA,UACA,kBAGF,sCACA,CAFF,WACE,WACA,qCACE,gCACA,2EACA,sDAKJ,aACE,mDAII,CAJJ,6CAII,kEACA,iBACE,iDACA,+CACE,aACA,WADA,+BACA,uEANN,YACE,mDAEE,mBADF,0CACE,CADF,qBACE,0DACA,YACE,4DACA,sEANN,YACE,8CACA,kBADA,UACA,2CACE,2EACA,cACE,kEACA,mEANN,yBACE,4DACA,sBACE,+EAEE,iEACA,qEANN,sCACE,CAGE,iBAHF,gBAGE,qBACE,CAJJ,uBACA,gDACE,wDACA,6DAHF,2CACA,CADA,gBACA,eACE,CAGE,sBANN,8BACE,CAII,iBAFF,4DACA,WACE,YADF,uCACE,6EACA,2BANN,8CACE,kDACA,0CACE,8BACA,yFACE,sBACA,sFALJ,mEACA,sBACE,kEACA,6EACE,uCACA,kEALJ,qGAEE,kEACA,6EACE,uCACA,kEALJ,8CACA,uDACE,sEACA,2EACE,sCACA,iEALJ,mGACA,qCACE,oDACA,0DACE,6GACA,gDAGR,yDCrEA,sEACE,CACA,6GACE,gEACF,iGAIF,wFACE,qDAGA,mGAEE,2CAEF,4FACE,gCACF,wGACE,8DAEE,6FAIA,iJAKN,6GACE,gDAKF,yDACA,qCAGA,6BACA,kBACA,qDAKA,oCAEA,+DAGA,2CAGE,oDAIA,oEAEE,qBAGJ,wDAEE,uCAEF,kEAGA,8CAEA,uDAKA,oCAEA,yDAEE,gEAKF,+CC5FA,0EAGE,CACA,qDCLJ,+DAIE,sCAIA,kEACE,yBACA,2FAMA,gBACA,yGCbF,mBAOA,2MAIA,4HAYA,0DACE,8GAYF,8HAQE,mBAEA,6HAOF,YAGA,mIAME,eACA,CAFF,YAEE,4FAMJ,8BAEE,uBAYA,sCAEE,CAJF,oBAEA,CARA,wCAEA,CAHA,8BACA,CAFA,eACA,CAGA,wCAEA,CAEA,mDAIE,kCACE,6BACA,4CAKJ,kDAIA,eACE,aAGF,8BACE,uDACA,sCACA,cAEA,+BACA,CAFA,eAEA,wCAEF,YACE,iBACA,mCACA,0DAGF,qBAEE,CAFF,kBAEE,+BAIA,yCAEE,qBADA,gBACA,yBAKF,eACA,CAFF,YACE,CACA,iBACA,qDAEA,mDCvIJ,2FAOE,iCACA,CAEA,eACA,CAHA,kBAEA,CAFA,wBAGA,8BACA,eACE,CAFF,YAEE,0BACA,8CAGA,oBACE,oCAGA,kBACE,8DAEA,iBAEN,UACE,8BAIJ,+CAEE,qDAEF,kDAIE,YAEF,CAFE,YAEF,CCjCE,mFAJA,QACA,UAIE,CADF,iBACE,mCAGA,iDACE,+BAGF,wBAEA,mBAKA,6CAEF,CAHE,mBACA,CAEF,kCAIE,CARA,kBACA,CAFF,eASE,YACA,mBAGF,CAJE,UAIF,wCCjCA,oBDmCE,wBCpCJ,uCACE,8BACA,4CACA,oBAGA,2CCAA,6CAGE,CAPF,uBAIA,CDGA,gDACE,6BCVJ,CAWM,2CAEF,CAJA,kCAEE,CDJF,aCLF,gBDKE,uBCMA,gCAGA,gDAGE,wBAGJ,0BAEA,iBACE,aACF,CADE,UACF,uBACE,aACF,oBACE,YACF,4BACE,6CAMA,CAYF,6DAZE,mCAGE,iCASJ,4BAGE,4DADA,+BACA,CAFA,qBAEA,yBACE,aAEF,wBAHA,SAGA,iHACE,2DAKF,CANA,yCACE,CADF,oCAMA,uSAIA,sGACE,oDChEJ,WAEF,yBACE,QACA,eAEA,gBAEE,uCAGA,CALF,iCAKE,uCAGA,0BACA,CACA,oBACA,iCClBJ,gBACE,KAGF,qBACE,YAGF,CAHE,cAGF,gCAEE,mBACA,iEAEA,oCACA,wCAEA,sBACA,WAEA,CAFA,YAEA,8EAEA,mCAFA,iBAEA,6BAIA,wEAKA,sDAIE,CARF,mDAIA,CAIE,cAEF,8CAIA,oBAFE,iBAEF,8CAGE,eAEF,CAFE,YAEF,OAEE,kBAGJ,CAJI,eACA,CAFF,mBAKF,yCCjDE,oBACA,CAFA,iBAEA,uCAKE,iBACA,qCAGA,mBCZJ,CDWI,gBCXJ,6BAEE,eACA,sBAGA,eAEA,sBACA,oDACA,iGAMA,gBAFE,YAEF,8FAME,iJClBF,YACA,gNAUE,6BAEF,oTAcI,kBACF,gHAIA,qBACE,eACF,qDACE,kBACF,6DACE,4BCxCJ,oBAEF,qCAEI,+CAGF,uBACE,uDAGJ,oBAkBE,mDAhBA,+CAaA,CAbA,oBAaA,0FAEE,CAFF,gGAbA,+BAaA,0BAGA,mQAIA,oNAEE,iBAGJ,CAHI,gBADA,gBAIJ,8CAYI,CAZJ,wCAYI,sVACE,iCAGA,uEAHA,QAGA,qXAKJ,iDAGF,CARM,+CACE,iDAIN,CALI,gBAQN,mHACE,gBAGF,2DACE,0EAOA,0EAKA,6EC/EA,iDACA,gCACA,oDAGA,qBACA,oDCFA,cACA,eAEA,yBAGF,sBAEE,iBACA,sNAWA,iBACE,kBACA,wRAgBA,kBAEA,iOAgBA,uCACE,uEAEA,kBAEF,qUAuBE,iDAIJ,CACA,geCxFF,4BAEE,CAQA,6JACA,iDAIA,sEAGA,mDAOF,iDAGE,4DAIA,8CACA,qDAEE,eAFF,cAEE,oBAEF,uBAFE,kCAGA,eACA,iBACA,mBAIA,mDACA,CAHA,uCAEA,CAJA,0CACA,CAIA,gBAJA,gBACA,oBADA,gBAIA,wBAEJ,gBAGE,6BACA,YAHA,iBAGA,gCACA,iEAEA,6CACA,sDACA,0BADA,wBACA,0BACA,oIAIA,mBAFA,YAEA,qBACA,0CAIE,uBAEF,CAHA,yBACE,CAEF,iDACE,mFAKJ,oCACE,CANE,aAKJ,CACE,qEAIA,YAFA,WAEA,CAHA,aACA,CAEA,gBACE,4BACA,sBADA,aACA,gCAMF,oCACA,yDACA,2CAEA,qBAGE,kBAEA,CACA,mCAIF,CARE,YACA,CAOF,iCAEE,CAPA,oBACA,CAQA,oBACE,uDAEJ,sDAGA,CAHA,cAGA,0BACE,oDAIA,oCACA,4BACA,sBAGA,cAEA,oFAGA,sBAEA,yDACE,CAIA,iBAJA,wBAIA,6CAJA,6CAOA,4BAGJ,CAHI,cAGJ,yCAGA,kBACE,CAIA,iDAEA,CATA,YAEF,CACE,4CAGA,kBAIA,wEAEA,wDAIF,kCAOE,iDACA,CARF,WAIE,sCAGA,CANA,2CACA,CAMA,oEARF,iBACE,CACA,qCAMA,iBAuBE,uBAlBF,YAKA,2DALA,uDAKA,CALA,sBAiBA,4CACE,CALA,gRAIF,YACE,UAEN,uBACE,YACA,mCAOE,+CAGA,8BAGF,+CAGA,4BCjNA,SDiNA,qFCjNA,gDAGA,sCACA,qCACA,sDAIF,CAIE,kDAGA,CAPF,0CAOE,kBAEA,kDAEA,CAHA,eACA,CAFA,YACA,CADA,SAIA,mHAIE,CAGA,6CAFA,oCAeE,CAbF,yBACE,qBAEJ,CAGE,oBACA,CAEA,YAFA,2CACF,CACE,uBAEA,mFAEE,CALJ,oBACE,CAEA,UAEE,gCAGF,sDAEA,yCC7CJ,oCAGA,CD6CE,yXAQE,sCCrDJ,wCAGA,oCACE","sources":["webpack:///./node_modules/normalize.css/normalize.css","webpack:///./src/furo/assets/styles/base/_print.sass","webpack:///./src/furo/assets/styles/base/_screen-readers.sass","webpack:///./src/furo/assets/styles/base/_theme.sass","webpack:///./src/furo/assets/styles/variables/_fonts.scss","webpack:///./src/furo/assets/styles/variables/_spacing.scss","webpack:///./src/furo/assets/styles/variables/_icons.scss","webpack:///./src/furo/assets/styles/variables/_admonitions.scss","webpack:///./src/furo/assets/styles/variables/_colors.scss","webpack:///./src/furo/assets/styles/base/_typography.sass","webpack:///./src/furo/assets/styles/_scaffold.sass","webpack:///./src/furo/assets/styles/content/_admonitions.sass","webpack:///./src/furo/assets/styles/content/_api.sass","webpack:///./src/furo/assets/styles/content/_blocks.sass","webpack:///./src/furo/assets/styles/content/_captions.sass","webpack:///./src/furo/assets/styles/content/_code.sass","webpack:///./src/furo/assets/styles/content/_footnotes.sass","webpack:///./src/furo/assets/styles/content/_images.sass","webpack:///./src/furo/assets/styles/content/_indexes.sass","webpack:///./src/furo/assets/styles/content/_lists.sass","webpack:///./src/furo/assets/styles/content/_math.sass","webpack:///./src/furo/assets/styles/content/_misc.sass","webpack:///./src/furo/assets/styles/content/_rubrics.sass","webpack:///./src/furo/assets/styles/content/_sidebar.sass","webpack:///./src/furo/assets/styles/content/_tables.sass","webpack:///./src/furo/assets/styles/content/_target.sass","webpack:///./src/furo/assets/styles/content/_gui-labels.sass","webpack:///./src/furo/assets/styles/components/_footer.sass","webpack:///./src/furo/assets/styles/components/_sidebar.sass","webpack:///./src/furo/assets/styles/components/_table_of_contents.sass","webpack:///./src/furo/assets/styles/_shame.sass"],"sourcesContent":["/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */\n\n/* Document\n ========================================================================== */\n\n/**\n * 1. Correct the line height in all browsers.\n * 2. Prevent adjustments of font size after orientation changes in iOS.\n */\n\nhtml {\n line-height: 1.15; /* 1 */\n -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/* Sections\n ========================================================================== */\n\n/**\n * Remove the margin in all browsers.\n */\n\nbody {\n margin: 0;\n}\n\n/**\n * Render the `main` element consistently in IE.\n */\n\nmain {\n display: block;\n}\n\n/**\n * Correct the font size and margin on `h1` elements within `section` and\n * `article` contexts in Chrome, Firefox, and Safari.\n */\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n/* Grouping content\n ========================================================================== */\n\n/**\n * 1. Add the correct box sizing in Firefox.\n * 2. Show the overflow in Edge and IE.\n */\n\nhr {\n box-sizing: content-box; /* 1 */\n height: 0; /* 1 */\n overflow: visible; /* 2 */\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\npre {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/* Text-level semantics\n ========================================================================== */\n\n/**\n * Remove the gray background on active links in IE 10.\n */\n\na {\n background-color: transparent;\n}\n\n/**\n * 1. Remove the bottom border in Chrome 57-\n * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n */\n\nabbr[title] {\n border-bottom: none; /* 1 */\n text-decoration: underline; /* 2 */\n text-decoration: underline dotted; /* 2 */\n}\n\n/**\n * Add the correct font weight in Chrome, Edge, and Safari.\n */\n\nb,\nstrong {\n font-weight: bolder;\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\ncode,\nkbd,\nsamp {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/**\n * Add the correct font size in all browsers.\n */\n\nsmall {\n font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` elements from affecting the line height in\n * all browsers.\n */\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -0.25em;\n}\n\nsup {\n top: -0.5em;\n}\n\n/* Embedded content\n ========================================================================== */\n\n/**\n * Remove the border on images inside links in IE 10.\n */\n\nimg {\n border-style: none;\n}\n\n/* Forms\n ========================================================================== */\n\n/**\n * 1. Change the font styles in all browsers.\n * 2. Remove the margin in Firefox and Safari.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n font-family: inherit; /* 1 */\n font-size: 100%; /* 1 */\n line-height: 1.15; /* 1 */\n margin: 0; /* 2 */\n}\n\n/**\n * Show the overflow in IE.\n * 1. Show the overflow in Edge.\n */\n\nbutton,\ninput { /* 1 */\n overflow: visible;\n}\n\n/**\n * Remove the inheritance of text transform in Edge, Firefox, and IE.\n * 1. Remove the inheritance of text transform in Firefox.\n */\n\nbutton,\nselect { /* 1 */\n text-transform: none;\n}\n\n/**\n * Correct the inability to style clickable types in iOS and Safari.\n */\n\nbutton,\n[type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\n/**\n * Remove the inner border and padding in Firefox.\n */\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n border-style: none;\n padding: 0;\n}\n\n/**\n * Restore the focus styles unset by the previous rule.\n */\n\nbutton:-moz-focusring,\n[type=\"button\"]:-moz-focusring,\n[type=\"reset\"]:-moz-focusring,\n[type=\"submit\"]:-moz-focusring {\n outline: 1px dotted ButtonText;\n}\n\n/**\n * Correct the padding in Firefox.\n */\n\nfieldset {\n padding: 0.35em 0.75em 0.625em;\n}\n\n/**\n * 1. Correct the text wrapping in Edge and IE.\n * 2. Correct the color inheritance from `fieldset` elements in IE.\n * 3. Remove the padding so developers are not caught out when they zero out\n * `fieldset` elements in all browsers.\n */\n\nlegend {\n box-sizing: border-box; /* 1 */\n color: inherit; /* 2 */\n display: table; /* 1 */\n max-width: 100%; /* 1 */\n padding: 0; /* 3 */\n white-space: normal; /* 1 */\n}\n\n/**\n * Add the correct vertical alignment in Chrome, Firefox, and Opera.\n */\n\nprogress {\n vertical-align: baseline;\n}\n\n/**\n * Remove the default vertical scrollbar in IE 10+.\n */\n\ntextarea {\n overflow: auto;\n}\n\n/**\n * 1. Add the correct box sizing in IE 10.\n * 2. Remove the padding in IE 10.\n */\n\n[type=\"checkbox\"],\n[type=\"radio\"] {\n box-sizing: border-box; /* 1 */\n padding: 0; /* 2 */\n}\n\n/**\n * Correct the cursor style of increment and decrement buttons in Chrome.\n */\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n/**\n * 1. Correct the odd appearance in Chrome and Safari.\n * 2. Correct the outline style in Safari.\n */\n\n[type=\"search\"] {\n -webkit-appearance: textfield; /* 1 */\n outline-offset: -2px; /* 2 */\n}\n\n/**\n * Remove the inner padding in Chrome and Safari on macOS.\n */\n\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n/**\n * 1. Correct the inability to style clickable types in iOS and Safari.\n * 2. Change font properties to `inherit` in Safari.\n */\n\n::-webkit-file-upload-button {\n -webkit-appearance: button; /* 1 */\n font: inherit; /* 2 */\n}\n\n/* Interactive\n ========================================================================== */\n\n/*\n * Add the correct display in Edge, IE 10+, and Firefox.\n */\n\ndetails {\n display: block;\n}\n\n/*\n * Add the correct display in all browsers.\n */\n\nsummary {\n display: list-item;\n}\n\n/* Misc\n ========================================================================== */\n\n/**\n * Add the correct display in IE 10+.\n */\n\ntemplate {\n display: none;\n}\n\n/**\n * Add the correct display in IE 10.\n */\n\n[hidden] {\n display: none;\n}\n","// This file contains styles for managing print media.\n\n////////////////////////////////////////////////////////////////////////////////\n// Hide elements not relevant to print media.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Hide icon container.\n .content-icon-container\n display: none !important\n\n // Hide showing header links if hovering over when printing.\n .headerlink\n display: none !important\n\n // Hide mobile header.\n .mobile-header\n display: none !important\n\n // Hide navigation links.\n .related-pages\n display: none !important\n\n////////////////////////////////////////////////////////////////////////////////\n// Tweaks related to decolorization.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Apply a border around code which no longer have a color background.\n .highlight\n border: 0.1pt solid var(--color-foreground-border)\n\n////////////////////////////////////////////////////////////////////////////////\n// Avoid page break in some relevant cases.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n ul, ol, dl, a, table, pre, blockquote\n page-break-inside: avoid\n\n h1, h2, h3, h4, h5, h6, img, figure, caption\n page-break-inside: avoid\n page-break-after: avoid\n\n ul, ol, dl\n page-break-before: avoid\n",".visually-hidden\n position: absolute !important\n width: 1px !important\n height: 1px !important\n padding: 0 !important\n margin: -1px !important\n overflow: hidden !important\n clip: rect(0,0,0,0) !important\n white-space: nowrap !important\n border: 0 !important\n\n:-moz-focusring\n outline: auto\n","// This file serves as the \"skeleton\" of the theming logic.\n//\n// This contains the bulk of the logic for handling dark mode, color scheme\n// toggling and the handling of color-scheme-specific hiding of elements.\n\nbody\n @include fonts\n @include spacing\n @include icons\n @include admonitions\n @include default-admonition(#651fff, \"abstract\")\n @include default-topic(#14B8A6, \"pencil\")\n\n @include colors\n\n.only-light\n display: block !important\nhtml body .only-dark\n display: none !important\n\n// Ignore dark-mode hints if print media.\n@media not print\n // Enable dark-mode, if requested.\n body[data-theme=\"dark\"]\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n // Enable dark mode, unless explicitly told to avoid.\n @media (prefers-color-scheme: dark)\n body:not([data-theme=\"light\"])\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n//\n// Theme toggle presentation\n//\nbody[data-theme=\"auto\"]\n .theme-toggle svg.theme-icon-when-auto\n display: block\n\nbody[data-theme=\"dark\"]\n .theme-toggle svg.theme-icon-when-dark\n display: block\n\nbody[data-theme=\"light\"]\n .theme-toggle svg.theme-icon-when-light\n display: block\n","// Fonts used by this theme.\n//\n// There are basically two things here -- using the system font stack and\n// defining sizes for various elements in %ages. We could have also used `em`\n// but %age is easier to reason about for me.\n\n@mixin fonts {\n // These are adapted from https://systemfontstack.com/\n --font-stack: -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial,\n sans-serif, Apple Color Emoji, Segoe UI Emoji;\n --font-stack--monospace: \"SFMono-Regular\", Menlo, Consolas, Monaco,\n Liberation Mono, Lucida Console, monospace;\n\n --font-size--normal: 100%;\n --font-size--small: 87.5%;\n --font-size--small--2: 81.25%;\n --font-size--small--3: 75%;\n --font-size--small--4: 62.5%;\n\n // Sidebar\n --sidebar-caption-font-size: var(--font-size--small--2);\n --sidebar-item-font-size: var(--font-size--small);\n --sidebar-search-input-font-size: var(--font-size--small);\n\n // Table of Contents\n --toc-font-size: var(--font-size--small--3);\n --toc-font-size--mobile: var(--font-size--normal);\n --toc-title-font-size: var(--font-size--small--4);\n\n // Admonitions\n //\n // These aren't defined in terms of %ages, since nesting these is permitted.\n --admonition-font-size: 0.8125rem;\n --admonition-title-font-size: 0.8125rem;\n\n // Code\n --code-font-size: var(--font-size--small--2);\n\n // API\n --api-font-size: var(--font-size--small);\n}\n","// Spacing for various elements on the page\n//\n// If the user wants to tweak things in a certain way, they are permitted to.\n// They also have to deal with the consequences though!\n\n@mixin spacing {\n // Header!\n --header-height: calc(\n var(--sidebar-item-line-height) + 4 * #{var(--sidebar-item-spacing-vertical)}\n );\n --header-padding: 0.5rem;\n\n // Sidebar\n --sidebar-tree-space-above: 1.5rem;\n --sidebar-caption-space-above: 1rem;\n\n --sidebar-item-line-height: 1rem;\n --sidebar-item-spacing-vertical: 0.5rem;\n --sidebar-item-spacing-horizontal: 1rem;\n --sidebar-item-height: calc(\n var(--sidebar-item-line-height) + 2 *#{var(--sidebar-item-spacing-vertical)}\n );\n\n --sidebar-expander-width: var(--sidebar-item-height); // be square\n\n --sidebar-search-space-above: 0.5rem;\n --sidebar-search-input-spacing-vertical: 0.5rem;\n --sidebar-search-input-spacing-horizontal: 0.5rem;\n --sidebar-search-input-height: 1rem;\n --sidebar-search-icon-size: var(--sidebar-search-input-height);\n\n // Table of Contents\n --toc-title-padding: 0.25rem 0;\n --toc-spacing-vertical: 1.5rem;\n --toc-spacing-horizontal: 1.5rem;\n --toc-item-spacing-vertical: 0.4rem;\n --toc-item-spacing-horizontal: 1rem;\n}\n","// Expose theme icons as CSS variables.\n\n$icons: (\n // Adapted from tabler-icons\n // url: https://tablericons.com/\n \"search\":\n url('data:image/svg+xml;charset=utf-8,'),\n // Factored out from mkdocs-material on 24-Aug-2020.\n // url: https://squidfunk.github.io/mkdocs-material/reference/admonitions/\n \"pencil\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"abstract\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"info\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"flame\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"question\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"warning\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"failure\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"spark\":\n url('data:image/svg+xml;charset=utf-8,')\n);\n\n@mixin icons {\n @each $name, $glyph in $icons {\n --icon-#{$name}: #{$glyph};\n }\n}\n","// Admonitions\n\n// Structure of these is:\n// admonition-class: color \"icon-name\";\n//\n// The colors are translated into CSS variables below. The icons are\n// used directly in the main declarations to set the `mask-image` in\n// the title.\n\n// prettier-ignore\n$admonitions: (\n // Each of these has an reST directives for it.\n \"caution\": #ff9100 \"spark\",\n \"warning\": #ff9100 \"warning\",\n \"danger\": #ff5252 \"spark\",\n \"attention\": #ff5252 \"warning\",\n \"error\": #ff5252 \"failure\",\n \"hint\": #00c852 \"question\",\n \"tip\": #00c852 \"info\",\n \"important\": #00bfa5 \"flame\",\n \"note\": #00b0ff \"pencil\",\n \"seealso\": #448aff \"info\",\n \"admonition-todo\": #808080 \"pencil\"\n);\n\n@mixin default-admonition($color, $icon-name) {\n --color-admonition-title: #{$color};\n --color-admonition-title-background: #{rgba($color, 0.2)};\n\n --icon-admonition-default: var(--icon-#{$icon-name});\n}\n\n@mixin default-topic($color, $icon-name) {\n --color-topic-title: #{$color};\n --color-topic-title-background: #{rgba($color, 0.2)};\n\n --icon-topic-default: var(--icon-#{$icon-name});\n}\n\n@mixin admonitions {\n @each $name, $values in $admonitions {\n --color-admonition-title--#{$name}: #{nth($values, 1)};\n --color-admonition-title-background--#{$name}: #{rgba(\n nth($values, 1),\n 0.2\n )};\n }\n}\n","// Colors used throughout this theme.\n//\n// The aim is to give the user more control. Thus, instead of hard-coding colors\n// in various parts of the stylesheet, the approach taken is to define all\n// colors as CSS variables and reusing them in all the places.\n//\n// `colors-dark` depends on `colors` being included at a lower specificity.\n\n@mixin colors {\n --color-problematic: #b30000;\n\n // Base Colors\n --color-foreground-primary: black; // for main text and headings\n --color-foreground-secondary: #5a5c63; // for secondary text\n --color-foreground-muted: #646776; // for muted text\n --color-foreground-border: #878787; // for content borders\n\n --color-background-primary: white; // for content\n --color-background-secondary: #f8f9fb; // for navigation + ToC\n --color-background-hover: #efeff4ff; // for navigation-item hover\n --color-background-hover--transparent: #efeff400;\n --color-background-border: #eeebee; // for UI borders\n --color-background-item: #ccc; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2962ff;\n --color-brand-content: #2a5adf;\n\n // API documentation\n --color-api-background: var(--color-background-hover--transparent);\n --color-api-background-hover: var(--color-background-hover);\n --color-api-overall: var(--color-foreground-secondary);\n --color-api-name: var(--color-problematic);\n --color-api-pre-name: var(--color-problematic);\n --color-api-paren: var(--color-foreground-secondary);\n --color-api-keyword: var(--color-foreground-primary);\n --color-highlight-on-target: #ffffcc;\n\n // Inline code background\n --color-inline-code-background: var(--color-background-secondary);\n\n // Highlighted text (search)\n --color-highlighted-background: #ddeeff;\n --color-highlighted-text: var(--color-foreground-primary);\n\n // GUI Labels\n --color-guilabel-background: #ddeeff80;\n --color-guilabel-border: #bedaf580;\n --color-guilabel-text: var(--color-foreground-primary);\n\n // Admonitions!\n --color-admonition-background: transparent;\n\n //////////////////////////////////////////////////////////////////////////////\n // Everything below this should be one of:\n // - var(...)\n // - *-gradient(...)\n // - special literal values (eg: transparent, none)\n //////////////////////////////////////////////////////////////////////////////\n\n // Tables\n --color-table-header-background: var(--color-background-secondary);\n --color-table-border: var(--color-background-border);\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: transparent;\n --color-card-marginals-background: var(--color-background-secondary);\n\n // Header\n --color-header-background: var(--color-background-primary);\n --color-header-border: var(--color-background-border);\n --color-header-text: var(--color-foreground-primary);\n\n // Sidebar (left)\n --color-sidebar-background: var(--color-background-secondary);\n --color-sidebar-background-border: var(--color-background-border);\n\n --color-sidebar-brand-text: var(--color-foreground-primary);\n --color-sidebar-caption-text: var(--color-foreground-muted);\n --color-sidebar-link-text: var(--color-foreground-secondary);\n --color-sidebar-link-text--top-level: var(--color-brand-primary);\n\n --color-sidebar-item-background: var(--color-sidebar-background);\n --color-sidebar-item-background--current: var(\n --color-sidebar-item-background\n );\n --color-sidebar-item-background--hover: linear-gradient(\n 90deg,\n var(--color-background-hover--transparent) 0%,\n var(--color-background-hover) var(--sidebar-item-spacing-horizontal),\n var(--color-background-hover) 100%\n );\n\n --color-sidebar-item-expander-background: transparent;\n --color-sidebar-item-expander-background--hover: var(\n --color-background-hover\n );\n\n --color-sidebar-search-text: var(--color-foreground-primary);\n --color-sidebar-search-background: var(--color-background-secondary);\n --color-sidebar-search-background--focus: var(--color-background-primary);\n --color-sidebar-search-border: var(--color-background-border);\n --color-sidebar-search-icon: var(--color-foreground-muted);\n\n // Table of Contents (right)\n --color-toc-background: var(--color-background-primary);\n --color-toc-title-text: var(--color-foreground-muted);\n --color-toc-item-text: var(--color-foreground-secondary);\n --color-toc-item-text--hover: var(--color-foreground-primary);\n --color-toc-item-text--active: var(--color-brand-primary);\n\n // Actual page contents\n --color-content-foreground: var(--color-foreground-primary);\n --color-content-background: transparent;\n\n // Links\n --color-link: var(--color-brand-content);\n --color-link--hover: var(--color-brand-content);\n --color-link-underline: var(--color-background-border);\n --color-link-underline--hover: var(--color-foreground-border);\n}\n\n@mixin colors-dark {\n --color-problematic: #ee5151;\n\n // Base Colors\n --color-foreground-primary: #ffffffcc; // for main text and headings\n --color-foreground-secondary: #9ca0a5; // for secondary text\n --color-foreground-muted: #81868d; // for muted text\n --color-foreground-border: #666666; // for content borders\n\n --color-background-primary: #131416; // for content\n --color-background-secondary: #1a1c1e; // for navigation + ToC\n --color-background-hover: #1e2124ff; // for navigation-item hover\n --color-background-hover--transparent: #1e212400;\n --color-background-border: #303335; // for UI borders\n --color-background-item: #444; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #2b8cee;\n --color-brand-content: #368ce2;\n\n // Highlighted text (search)\n --color-highlighted-background: #083563;\n\n // GUI Labels\n --color-guilabel-background: #08356380;\n --color-guilabel-border: #13395f80;\n\n // API documentation\n --color-api-keyword: var(--color-foreground-secondary);\n --color-highlight-on-target: #333300;\n\n // Admonitions\n --color-admonition-background: #18181a;\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: #18181a;\n --color-card-marginals-background: var(--color-background-hover);\n}\n","// This file contains the styling for making the content throughout the page,\n// including fonts, paragraphs, headings and spacing among these elements.\n\nbody\n font-family: var(--font-stack)\npre,\ncode,\nkbd,\nsamp\n font-family: var(--font-stack--monospace)\n\n// Make fonts look slightly nicer.\nbody\n -webkit-font-smoothing: antialiased\n -moz-osx-font-smoothing: grayscale\n\n// Line height from Bootstrap 4.1\narticle\n line-height: 1.5\n\n//\n// Headings\n//\nh1,\nh2,\nh3,\nh4,\nh5,\nh6\n line-height: 1.25\n font-weight: bold\n\n border-radius: 0.5rem\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n margin-left: -0.5rem\n margin-right: -0.5rem\n padding-left: 0.5rem\n padding-right: 0.5rem\n\n + p\n margin-top: 0\n\nh1\n font-size: 2.5em\n margin-top: 1.75rem\n margin-bottom: 1rem\nh2\n font-size: 2em\n margin-top: 1.75rem\nh3\n font-size: 1.5em\nh4\n font-size: 1.25em\nh5\n font-size: 1.125em\nh6\n font-size: 1em\n\nsmall\n opacity: 75%\n font-size: 80%\n\n// Paragraph\np\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n\n// Horizontal rules\nhr.docutils\n height: 1px\n padding: 0\n margin: 2rem 0\n background-color: var(--color-background-border)\n border: 0\n\n.centered\n text-align: center\n\n// Links\na\n text-decoration: underline\n\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &.muted-link\n color: inherit\n &:hover\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline--hover)\n","// This file contains the styles for the overall layouting of the documentation\n// skeleton, including the responsive changes as well as sidebar toggles.\n//\n// This is implemented as a mobile-last design, which isn't ideal, but it is\n// reasonably good-enough and I got pretty tired by the time I'd finished this\n// to move the rules around to fix this. Shouldn't take more than 3-4 hours,\n// if you know what you're doing tho.\n\n// HACK: Not all browsers account for the scrollbar width in media queries.\n// This results in horizontal scrollbars in the breakpoint where we go\n// from displaying everything to hiding the ToC. We accomodate for this by\n// adding a bit of padding to the TOC drawer, disabling the horizontal\n// scrollbar and allowing the scrollbars to cover the padding.\n// https://www.456bereastreet.com/archive/201301/media_query_width_and_vertical_scrollbars/\n\n// HACK: Always having the scrollbar visible, prevents certain browsers from\n// causing the content to stutter horizontally between taller-than-viewport and\n// not-taller-than-viewport pages.\n\nhtml\n overflow-x: hidden\n overflow-y: scroll\n scroll-behavior: smooth\n\n.sidebar-scroll, .toc-scroll, article[role=main] *\n // Override Firefox scrollbar style\n scrollbar-width: thin\n scrollbar-color: var(--color-foreground-border) transparent\n\n // Override Chrome scrollbar styles\n &::-webkit-scrollbar\n width: 0.25rem\n height: 0.25rem\n &::-webkit-scrollbar-thumb\n background-color: var(--color-foreground-border)\n border-radius: 0.125rem\n\n//\n// Overalls\n//\nhtml,\nbody\n height: 100%\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\narticle\n color: var(--color-content-foreground)\n background: var(--color-content-background)\n overflow-wrap: break-word\n\n.page\n display: flex\n // fill the viewport for pages with little content.\n min-height: 100%\n\n.mobile-header\n width: 100%\n height: var(--header-height)\n background-color: var(--color-header-background)\n color: var(--color-header-text)\n border-bottom: 1px solid var(--color-header-border)\n\n // Looks like sub-script/super-script have this, and we need this to\n // be \"on top\" of those.\n z-index: 10\n\n // We don't show the header on large screens.\n display: none\n\n // Add shadow when scrolled\n &.scrolled\n border-bottom: none\n box-shadow: 0 0 0.2rem rgba(0, 0, 0, 0.1), 0 0.2rem 0.4rem rgba(0, 0, 0, 0.2)\n\n .header-center\n a\n color: var(--color-header-text)\n text-decoration: none\n\n.main\n display: flex\n flex: 1\n\n// Sidebar (left) also covers the entire left portion of screen.\n.sidebar-drawer\n box-sizing: border-box\n\n border-right: 1px solid var(--color-sidebar-background-border)\n background: var(--color-sidebar-background)\n\n display: flex\n justify-content: flex-end\n // These next two lines took me two days to figure out.\n width: calc((100% - #{$full-width}) / 2 + #{$sidebar-width})\n min-width: $sidebar-width\n\n// Scroll-along sidebars\n.sidebar-container,\n.toc-drawer\n box-sizing: border-box\n width: $sidebar-width\n\n.toc-drawer\n background: var(--color-toc-background)\n // See HACK described on top of this document\n padding-right: 1rem\n\n.sidebar-sticky,\n.toc-sticky\n position: sticky\n top: 0\n height: min(100%, 100vh)\n height: 100vh\n\n display: flex\n flex-direction: column\n\n.sidebar-scroll,\n.toc-scroll\n flex-grow: 1\n flex-shrink: 1\n\n overflow: auto\n scroll-behavior: smooth\n\n// Central items.\n.content\n padding: 0 $content-padding\n width: $content-width\n\n display: flex\n flex-direction: column\n justify-content: space-between\n\n.icon\n display: inline-block\n height: 1rem\n width: 1rem\n svg\n width: 100%\n height: 100%\n\n//\n// Accommodate announcement banner\n//\n.announcement\n background-color: var(--color-announcement-background)\n color: var(--color-announcement-text)\n\n height: var(--header-height)\n display: flex\n align-items: center\n overflow-x: auto\n & + .page\n min-height: calc(100% - var(--header-height))\n\n.announcement-content\n box-sizing: border-box\n padding: 0.5rem\n min-width: 100%\n white-space: nowrap\n text-align: center\n\n a\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-announcement-text)\n\n &:hover\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-link--hover)\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for theme\n////////////////////////////////////////////////////////////////////////////////\n.no-js .theme-toggle-container // don't show theme toggle if there's no JS\n display: none\n\n.theme-toggle-container\n vertical-align: middle\n\n.theme-toggle\n cursor: pointer\n border: none\n padding: 0\n background: transparent\n\n.theme-toggle svg\n vertical-align: middle\n height: 1rem\n width: 1rem\n color: var(--color-foreground-primary)\n display: none\n\n.theme-toggle-header\n float: left\n padding: 1rem 0.5rem\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for elements\n////////////////////////////////////////////////////////////////////////////////\n.toc-overlay-icon, .nav-overlay-icon\n display: none\n cursor: pointer\n\n .icon\n color: var(--color-foreground-secondary)\n height: 1rem\n width: 1rem\n\n.toc-header-icon, .nav-overlay-icon\n // for when we set display: flex\n justify-content: center\n align-items: center\n\n.toc-content-icon\n height: 1.5rem\n width: 1.5rem\n\n.content-icon-container\n float: right\n display: flex\n margin-top: 1.5rem\n margin-left: 1rem\n margin-bottom: 1rem\n gap: 0.5rem\n\n .edit-this-page svg\n color: inherit\n height: 1rem\n width: 1rem\n\n.sidebar-toggle\n position: absolute\n display: none\n// \n.sidebar-toggle[name=\"__toc\"]\n left: 20px\n.sidebar-toggle:checked\n left: 40px\n// \n\n.overlay\n position: fixed\n top: 0\n width: 0\n height: 0\n\n transition: width 0ms, height 0ms, opacity 250ms ease-out\n\n opacity: 0\n background-color: rgba(0, 0, 0, 0.54)\n.sidebar-overlay\n z-index: 20\n.toc-overlay\n z-index: 40\n\n// Keep things on top and smooth.\n.sidebar-drawer\n z-index: 30\n transition: left 250ms ease-in-out\n.toc-drawer\n z-index: 50\n transition: right 250ms ease-in-out\n\n// Show the Sidebar\n#__navigation:checked\n & ~ .sidebar-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .sidebar-drawer\n top: 0\n left: 0\n // Show the toc sidebar\n#__toc:checked\n & ~ .toc-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .toc-drawer\n top: 0\n right: 0\n\n////////////////////////////////////////////////////////////////////////////////\n// Back to top\n////////////////////////////////////////////////////////////////////////////////\n.back-to-top\n text-decoration: none\n\n display: none\n position: fixed\n left: 0\n top: 1rem\n padding: 0.5rem\n padding-right: 0.75rem\n border-radius: 1rem\n font-size: 0.8125rem\n\n background: var(--color-background-primary)\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), #6b728080 0px 0px 1px 0px\n\n z-index: 10\n\n margin-left: 50%\n transform: translateX(-50%)\n svg\n height: 1rem\n width: 1rem\n fill: currentColor\n display: inline-block\n\n span\n margin-left: 0.25rem\n\n .show-back-to-top &\n display: flex\n align-items: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Responsive layouting\n////////////////////////////////////////////////////////////////////////////////\n// Make things a bit bigger on bigger screens.\n@media (min-width: $full-width + $sidebar-width)\n html\n font-size: 110%\n\n@media (max-width: $full-width)\n // Collapse \"toc\" into the icon.\n .toc-content-icon\n display: flex\n .toc-drawer\n position: fixed\n height: 100vh\n top: 0\n right: -$sidebar-width\n border-left: 1px solid var(--color-background-muted)\n .toc-tree\n border-left: none\n font-size: var(--toc-font-size--mobile)\n\n // Accomodate for a changed content width.\n .sidebar-drawer\n width: calc((100% - #{$full-width - $sidebar-width}) / 2 + #{$sidebar-width})\n\n@media (max-width: $full-width - $sidebar-width)\n // Collapse \"navigation\".\n .nav-overlay-icon\n display: flex\n .sidebar-drawer\n position: fixed\n height: 100vh\n width: $sidebar-width\n\n top: 0\n left: -$sidebar-width\n\n // Swap which icon is visible.\n .toc-header-icon\n display: flex\n .toc-content-icon, .theme-toggle-content\n display: none\n .theme-toggle-header\n display: block\n\n // Show the header.\n .mobile-header\n position: sticky\n top: 0\n display: flex\n justify-content: space-between\n align-items: center\n\n .header-left,\n .header-right\n display: flex\n height: var(--header-height)\n padding: 0 var(--header-padding)\n label\n height: 100%\n width: 100%\n user-select: none\n\n .nav-overlay-icon .icon,\n .theme-toggle svg\n height: 1.25rem\n width: 1.25rem\n\n // Add a scroll margin for the content\n :target\n scroll-margin-top: var(--header-height)\n\n // Show back-to-top below the header\n .back-to-top\n top: calc(var(--header-height) + 0.5rem)\n\n // Center the page, and accommodate for the header.\n .page\n flex-direction: column\n justify-content: center\n .content\n margin-left: auto\n margin-right: auto\n\n@media (max-width: $content-width + 2* $content-padding)\n // Content should respect window limits.\n .content\n width: 100%\n overflow-x: auto\n\n@media (max-width: $content-width)\n .content\n padding: 0 $content-padding--small\n // Don't float sidebars to the right.\n article aside.sidebar\n float: none\n width: 100%\n margin: 1rem 0\n","//\n// The design here is strongly inspired by mkdocs-material.\n.admonition, .topic\n margin: 1rem auto\n padding: 0 0.5rem 0.5rem 0.5rem\n\n background: var(--color-admonition-background)\n\n border-radius: 0.2rem\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n font-size: var(--admonition-font-size)\n\n overflow: hidden\n page-break-inside: avoid\n\n // First element should have no margin, since the title has it.\n > :nth-child(2)\n margin-top: 0\n\n // Last item should have no margin, since we'll control that w/ padding\n > :last-child\n margin-bottom: 0\n\n.admonition p.admonition-title,\np.topic-title\n position: relative\n margin: 0 -0.5rem 0.5rem\n padding-left: 2rem\n padding-right: .5rem\n padding-top: .4rem\n padding-bottom: .4rem\n\n font-weight: 500\n font-size: var(--admonition-title-font-size)\n line-height: 1.3\n\n // Our fancy icon\n &::before\n content: \"\"\n position: absolute\n left: 0.5rem\n width: 1rem\n height: 1rem\n\n// Default styles\np.admonition-title\n background-color: var(--color-admonition-title-background)\n &::before\n background-color: var(--color-admonition-title)\n mask-image: var(--icon-admonition-default)\n mask-repeat: no-repeat\n\np.topic-title\n background-color: var(--color-topic-title-background)\n &::before\n background-color: var(--color-topic-title)\n mask-image: var(--icon-topic-default)\n mask-repeat: no-repeat\n\n//\n// Variants\n//\n.admonition\n border-left: 0.2rem solid var(--color-admonition-title)\n\n @each $type, $value in $admonitions\n &.#{$type}\n border-left-color: var(--color-admonition-title--#{$type})\n > .admonition-title\n background-color: var(--color-admonition-title-background--#{$type})\n &::before\n background-color: var(--color-admonition-title--#{$type})\n mask-image: var(--icon-#{nth($value, 2)})\n\n.admonition-todo > .admonition-title\n text-transform: uppercase\n","// This file stylizes the API documentation (stuff generated by autodoc). It's\n// deeply nested due to how autodoc structures the HTML without enough classes\n// to select the relevant items.\n\n// API docs!\ndl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)\n // Tweak the spacing of all the things!\n dd\n margin-left: 2rem\n > :first-child\n margin-top: 0.125rem\n > :last-child\n margin-bottom: 0.75rem\n\n // This is used for the arguments\n .field-list\n margin-bottom: 0.75rem\n\n // \"Headings\" (like \"Parameters\" and \"Return\")\n > dt\n text-transform: uppercase\n font-size: var(--font-size--small)\n\n dd:empty\n margin-bottom: 0.5rem\n dd > ul\n margin-left: -1.2rem\n > li\n > p:nth-child(2)\n margin-top: 0\n // When the last-empty-paragraph follows a paragraph, it doesn't need\n // to augument the existing spacing.\n > p + p:last-child:empty\n margin-top: 0\n margin-bottom: 0\n\n // Colorize the elements\n > dt\n color: var(--color-api-overall)\n\n.sig:not(.sig-inline)\n font-weight: bold\n\n font-size: var(--api-font-size)\n font-family: var(--font-stack--monospace)\n\n margin-left: -0.25rem\n margin-right: -0.25rem\n padding-top: 0.25rem\n padding-bottom: 0.25rem\n padding-right: 0.5rem\n\n // These are intentionally em, to properly match the font size.\n padding-left: 3em\n text-indent: -2.5em\n\n border-radius: 0.25rem\n\n background: var(--color-api-background)\n transition: background 100ms ease-out\n\n &:hover\n background: var(--color-api-background-hover)\n\n // adjust the size of the [source] link on the right.\n a.reference\n .viewcode-link\n font-weight: normal\n width: 3.5rem\n\nem.property\n font-style: normal\n &:first-child\n color: var(--color-api-keyword)\n.sig-name\n color: var(--color-api-name)\n.sig-prename\n font-weight: normal\n color: var(--color-api-pre-name)\n.sig-paren\n color: var(--color-api-paren)\n.sig-param\n font-style: normal\n\n.versionmodified\n font-style: italic\ndiv.versionadded, div.versionchanged, div.deprecated\n p\n margin-top: 0.125rem\n margin-bottom: 0.125rem\n\n// Align the [docs] and [source] to the right.\n.viewcode-link, .viewcode-back\n float: right\n text-align: right\n",".line-block\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n .line-block\n margin-top: 0rem\n margin-bottom: 0rem\n padding-left: 1rem\n","// Captions\narticle p.caption,\ntable > caption,\n.code-block-caption\n font-size: var(--font-size--small)\n text-align: center\n\n// Caption above a TOCTree\n.toctree-wrapper.compound\n .caption, :not(.caption) > .caption-text\n font-size: var(--font-size--small)\n text-transform: uppercase\n\n text-align: initial\n margin-bottom: 0\n\n > ul\n margin-top: 0\n margin-bottom: 0\n","// Inline code\ncode.literal, .sig-inline\n background: var(--color-inline-code-background)\n border-radius: 0.2em\n // Make the font smaller, and use padding to recover.\n font-size: var(--font-size--small--2)\n padding: 0.1em 0.2em\n\n pre.literal-block &\n font-size: inherit\n padding: 0\n\n p &\n border: 1px solid var(--color-background-border)\n\n.sig-inline\n font-family: var(--font-stack--monospace)\n\n// Code and Literal Blocks\n$code-spacing-vertical: 0.625rem\n$code-spacing-horizontal: 0.875rem\n\n// Wraps every literal block + line numbers.\ndiv[class*=\" highlight-\"],\ndiv[class^=\"highlight-\"]\n margin: 1em 0\n display: flex\n\n .table-wrapper\n margin: 0\n padding: 0\n\npre\n margin: 0\n padding: 0\n overflow: auto\n\n // Needed to have more specificity than pygments' \"pre\" selector. :(\n article[role=\"main\"] .highlight &\n line-height: 1.5\n\n &.literal-block,\n .highlight &\n font-size: var(--code-font-size)\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n // Make it look like all the other blocks.\n &.literal-block\n margin-top: 1rem\n margin-bottom: 1rem\n\n border-radius: 0.2rem\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n\n// All code is always contained in this.\n.highlight\n width: 100%\n border-radius: 0.2rem\n\n // Make line numbers and prompts un-selectable.\n .gp, span.linenos\n user-select: none\n pointer-events: none\n\n // Expand the line-highlighting.\n .hll\n display: block\n margin-left: -$code-spacing-horizontal\n margin-right: -$code-spacing-horizontal\n padding-left: $code-spacing-horizontal\n padding-right: $code-spacing-horizontal\n\n/* Make code block captions be nicely integrated */\n.code-block-caption\n display: flex\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n border-radius: 0.25rem\n border-bottom-left-radius: 0\n border-bottom-right-radius: 0\n font-weight: 300\n border-bottom: 1px solid\n\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n border-color: var(--color-background-border)\n\n + div[class]\n margin-top: 0\n pre\n border-top-left-radius: 0\n border-top-right-radius: 0\n\n// When `html_codeblock_linenos_style` is table.\n.highlighttable\n width: 100%\n display: block\n tbody\n display: block\n\n tr\n display: flex\n\n // Line numbers\n td.linenos\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n padding: $code-spacing-vertical $code-spacing-horizontal\n padding-right: 0\n border-top-left-radius: 0.2rem\n border-bottom-left-radius: 0.2rem\n\n .linenodiv\n padding-right: $code-spacing-horizontal\n font-size: var(--code-font-size)\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n\n // Actual code\n td.code\n padding: 0\n display: block\n flex: 1\n overflow: hidden\n\n .highlight\n border-top-left-radius: 0\n border-bottom-left-radius: 0\n\n// When `html_codeblock_linenos_style` is inline.\n.highlight\n span.linenos\n display: inline-block\n padding-left: 0\n padding-right: $code-spacing-horizontal\n margin-right: $code-spacing-horizontal\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n","// Inline Footnote Reference\n.footnote-reference\n font-size: var(--font-size--small--4)\n vertical-align: super\n\n// Definition list, listing the content of each note.\n// docutils <= 0.17\ndl.footnote.brackets\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\n display: grid\n grid-template-columns: max-content auto\n dt\n margin: 0\n > .fn-backref\n margin-left: 0.25rem\n\n &:after\n content: \":\"\n\n .brackets\n &:before\n content: \"[\"\n &:after\n content: \"]\"\n\n dd\n margin: 0\n padding: 0 1rem\n\n// docutils >= 0.18\naside.footnote\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\naside.footnote > span,\ndiv.citation > span\n float: left\n font-weight: 500\n padding-right: 0.25rem\n\naside.footnote > p,\ndiv.citation > p\n margin-left: 2rem\n","//\n// Figures\n//\nimg\n box-sizing: border-box\n max-width: 100%\n height: auto\n\narticle\n figure, .figure\n border-radius: 0.2rem\n\n margin: 0\n :last-child\n margin-bottom: 0\n\n .align-left\n float: left\n clear: left\n margin: 0 1rem 1rem\n\n .align-right\n float: right\n clear: right\n margin: 0 1rem 1rem\n\n .align-default,\n .align-center\n display: block\n text-align: center\n margin-left: auto\n margin-right: auto\n\n // WELL, table needs to be stylised like a table.\n table.align-default\n display: table\n text-align: initial\n",".genindex-jumpbox, .domainindex-jumpbox\n border-top: 1px solid var(--color-background-border)\n border-bottom: 1px solid var(--color-background-border)\n padding: 0.25rem\n\n.genindex-section, .domainindex-section\n h2\n margin-top: 0.75rem\n margin-bottom: 0.5rem\n ul\n margin-top: 0\n margin-bottom: 0\n","ul,\nol\n padding-left: 1.2rem\n\n // Space lists out like paragraphs\n margin-top: 1rem\n margin-bottom: 1rem\n // reduce margins within li.\n li\n > p:first-child\n margin-top: 0.25rem\n margin-bottom: 0.25rem\n\n > p:last-child\n margin-top: 0.25rem\n\n > ul,\n > ol\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n\nol\n &.arabic\n list-style: decimal\n &.loweralpha\n list-style: lower-alpha\n &.upperalpha\n list-style: upper-alpha\n &.lowerroman\n list-style: lower-roman\n &.upperroman\n list-style: upper-roman\n\n// Don't space lists out when they're \"simple\" or in a `.. toctree::`\n.simple,\n.toctree-wrapper\n li\n > ul,\n > ol\n margin-top: 0\n margin-bottom: 0\n\n// Definition Lists\n.field-list,\n.option-list,\ndl:not([class]),\ndl.simple,\ndl.footnote,\ndl.glossary\n dt\n font-weight: 500\n margin-top: 0.25rem\n + dt\n margin-top: 0\n\n .classifier::before\n content: \":\"\n margin-left: 0.2rem\n margin-right: 0.2rem\n\n dd\n > p:first-child,\n ul\n margin-top: 0.125rem\n\n ul\n margin-bottom: 0.125rem\n",".math-wrapper\n width: 100%\n overflow-x: auto\n\ndiv.math\n position: relative\n text-align: center\n\n .headerlink,\n &:focus .headerlink\n display: none\n\n &:hover .headerlink\n display: inline-block\n\n span.eqno\n position: absolute\n right: 0.5rem\n top: 50%\n transform: translate(0, -50%)\n z-index: 1\n","// Abbreviations\nabbr[title]\n cursor: help\n\n// \"Problematic\" content, as identified by Sphinx\n.problematic\n color: var(--color-problematic)\n\n// Keyboard / Mouse \"instructions\"\nkbd:not(.compound)\n margin: 0 0.2rem\n padding: 0 0.2rem\n border-radius: 0.2rem\n border: 1px solid var(--color-foreground-border)\n color: var(--color-foreground-primary)\n vertical-align: text-bottom\n\n font-size: var(--font-size--small--3)\n display: inline-block\n\n box-shadow: 0 0.0625rem 0 rgba(0, 0, 0, 0.2), inset 0 0 0 0.125rem var(--color-background-primary)\n\n background-color: var(--color-background-secondary)\n\n// Blockquote\nblockquote\n border-left: 4px solid var(--color-background-border)\n background: var(--color-background-secondary)\n\n margin-left: 0\n margin-right: 0\n padding: 0.5rem 1rem\n\n .attribution\n font-weight: 600\n text-align: right\n\n &.pull-quote,\n &.highlights\n font-size: 1.25em\n\n &.epigraph,\n &.pull-quote\n border-left-width: 0\n border-radius: 0.5rem\n\n &.highlights\n border-left-width: 0\n background: transparent\n\n// Center align embedded-in-text images\np .reference img\n vertical-align: middle\n","p.rubric\n line-height: 1.25\n font-weight: bold\n font-size: 1.125em\n\n // For Numpy-style documentation that's got rubrics within it.\n // https://github.com/pradyunsg/furo/discussions/505\n dd &\n line-height: inherit\n font-weight: inherit\n\n font-size: var(--font-size--small)\n text-transform: uppercase\n","article .sidebar\n float: right\n clear: right\n width: 30%\n\n margin-left: 1rem\n margin-right: 0\n\n border-radius: 0.2rem\n background-color: var(--color-background-secondary)\n border: var(--color-background-border) 1px solid\n\n > *\n padding-left: 1rem\n padding-right: 1rem\n\n > ul, > ol // lists need additional padding, because bullets.\n padding-left: 2.2rem\n\n .sidebar-title\n margin: 0\n padding: 0.5rem 1rem\n border-bottom: var(--color-background-border) 1px solid\n\n font-weight: 500\n\n// TODO: subtitle\n// TODO: dedicated variables?\n",".table-wrapper\n width: 100%\n overflow-x: auto\n margin-top: 1rem\n margin-bottom: 0.5rem\n padding: 0.2rem 0.2rem 0.75rem\n\ntable.docutils\n border-radius: 0.2rem\n border-spacing: 0\n border-collapse: collapse\n\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n th\n background: var(--color-table-header-background)\n\n td,\n th\n // Space things out properly\n padding: 0 0.25rem\n\n // Get the borders looking just-right.\n border-left: 1px solid var(--color-table-border)\n border-right: 1px solid var(--color-table-border)\n border-bottom: 1px solid var(--color-table-border)\n\n p\n margin: 0.25rem\n\n &:first-child\n border-left: none\n &:last-child\n border-right: none\n\n // MyST-parser tables set these classes for control of column alignment\n &.text-left\n text-align: left\n &.text-right\n text-align: right\n &.text-center\n text-align: center\n",":target\n scroll-margin-top: 0.5rem\n\n@media (max-width: $full-width - $sidebar-width)\n :target\n scroll-margin-top: calc(0.5rem + var(--header-height))\n\n // When a heading is selected\n section > span:target\n scroll-margin-top: calc(0.8rem + var(--header-height))\n\n// Permalinks\n.headerlink\n font-weight: 100\n user-select: none\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\ndl dt,\np.caption,\nfigcaption p,\ntable > caption,\n.code-block-caption\n > .headerlink\n margin-left: 0.5rem\n visibility: hidden\n &:hover > .headerlink\n visibility: visible\n\n // Don't change to link-like, if someone adds the contents directive.\n > .toc-backref\n color: inherit\n text-decoration-line: none\n\n// Figure and table captions are special.\nfigure:hover > figcaption > p > .headerlink,\ntable:hover > caption > .headerlink\n visibility: visible\n\n:target >, // Regular section[id] style anchors\nspan:target ~ // Non-regular span[id] style \"extra\" anchors\n h1,\n h2,\n h3,\n h4,\n h5,\n h6\n &:nth-of-type(1)\n background-color: var(--color-highlight-on-target)\n // .headerlink\n // visibility: visible\n code.literal\n background-color: transparent\n\ntable:target > caption,\nfigure:target\n background-color: var(--color-highlight-on-target)\n\n// Inline page contents\n.this-will-duplicate-information-and-it-is-still-useful-here li :target\n background-color: var(--color-highlight-on-target)\n\n// Code block permalinks\n.literal-block-wrapper:target .code-block-caption\n background-color: var(--color-highlight-on-target)\n\n// When a definition list item is selected\n//\n// There isn't really an alternative to !important here, due to the\n// high-specificity of API documentation's selector.\ndt:target\n background-color: var(--color-highlight-on-target) !important\n\n// When a footnote reference is selected\n.footnote > dt:target + dd,\n.footnote-reference:target\n background-color: var(--color-highlight-on-target)\n",".guilabel\n background-color: var(--color-guilabel-background)\n border: 1px solid var(--color-guilabel-border)\n color: var(--color-guilabel-text)\n\n padding: 0 0.3em\n border-radius: 0.5em\n font-size: 0.9em\n","// This file contains the styles used for stylizing the footer that's shown\n// below the content.\n\nfooter\n font-size: var(--font-size--small)\n display: flex\n flex-direction: column\n\n margin-top: 2rem\n\n// Bottom of page information\n.bottom-of-page\n display: flex\n align-items: center\n justify-content: space-between\n\n margin-top: 1rem\n padding-top: 1rem\n padding-bottom: 1rem\n\n color: var(--color-foreground-secondary)\n border-top: 1px solid var(--color-background-border)\n\n line-height: 1.5\n\n @media (max-width: $content-width)\n text-align: center\n flex-direction: column-reverse\n gap: 0.25rem\n\n .left-details\n font-size: var(--font-size--small)\n\n .right-details\n display: flex\n flex-direction: column\n gap: 0.25rem\n text-align: right\n\n .icons\n display: flex\n justify-content: flex-end\n gap: 0.25rem\n font-size: 1rem\n\n a\n text-decoration: none\n\n svg,\n img\n font-size: 1.125rem\n height: 1em\n width: 1em\n\n// Next/Prev page information\n.related-pages\n a\n display: flex\n align-items: center\n\n text-decoration: none\n &:hover .page-info .title\n text-decoration: underline\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n svg.furo-related-icon,\n svg.furo-related-icon > use\n flex-shrink: 0\n\n color: var(--color-foreground-border)\n\n width: 0.75rem\n height: 0.75rem\n margin: 0 0.5rem\n\n &.next-page\n max-width: 50%\n\n float: right\n clear: right\n text-align: right\n\n &.prev-page\n max-width: 50%\n\n float: left\n clear: left\n\n svg\n transform: rotate(180deg)\n\n.page-info\n display: flex\n flex-direction: column\n overflow-wrap: anywhere\n\n .next-page &\n align-items: flex-end\n\n .context\n display: flex\n align-items: center\n\n padding-bottom: 0.1rem\n\n color: var(--color-foreground-muted)\n font-size: var(--font-size--small)\n text-decoration: none\n","// This file contains the styles for the contents of the left sidebar, which\n// contains the navigation tree, logo, search etc.\n\n////////////////////////////////////////////////////////////////////////////////\n// Brand on top of the scrollable tree.\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-brand\n display: flex\n flex-direction: column\n flex-shrink: 0\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n text-decoration: none\n\n.sidebar-brand-text\n color: var(--color-sidebar-brand-text)\n overflow-wrap: break-word\n margin: var(--sidebar-item-spacing-vertical) 0\n font-size: 1.5rem\n\n.sidebar-logo-container\n margin: var(--sidebar-item-spacing-vertical) 0\n\n.sidebar-logo\n margin: 0 auto\n display: block\n max-width: 100%\n\n////////////////////////////////////////////////////////////////////////////////\n// Search\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-search-container\n display: flex\n align-items: center\n margin-top: var(--sidebar-search-space-above)\n\n position: relative\n\n background: var(--color-sidebar-search-background)\n &:hover,\n &:focus-within\n background: var(--color-sidebar-search-background--focus)\n\n &::before\n content: \"\"\n position: absolute\n left: var(--sidebar-item-spacing-horizontal)\n width: var(--sidebar-search-icon-size)\n height: var(--sidebar-search-icon-size)\n\n background-color: var(--color-sidebar-search-icon)\n mask-image: var(--icon-search)\n\n.sidebar-search\n box-sizing: border-box\n\n border: none\n border-top: 1px solid var(--color-sidebar-search-border)\n border-bottom: 1px solid var(--color-sidebar-search-border)\n\n padding-top: var(--sidebar-search-input-spacing-vertical)\n padding-bottom: var(--sidebar-search-input-spacing-vertical)\n padding-right: var(--sidebar-search-input-spacing-horizontal)\n padding-left: calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size))\n\n width: 100%\n\n color: var(--color-sidebar-search-foreground)\n background: transparent\n z-index: 10\n\n &:focus\n outline: none\n\n &::placeholder\n font-size: var(--sidebar-search-input-font-size)\n\n//\n// Hide Search Matches link\n//\n#searchbox .highlight-link\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0\n margin: 0\n text-align: center\n\n a\n color: var(--color-sidebar-search-icon)\n font-size: var(--font-size--small--2)\n\n////////////////////////////////////////////////////////////////////////////////\n// Structure/Skeleton of the navigation tree (left)\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-tree\n font-size: var(--sidebar-item-font-size)\n margin-top: var(--sidebar-tree-space-above)\n margin-bottom: var(--sidebar-item-spacing-vertical)\n\n ul\n padding: 0\n margin-top: 0\n margin-bottom: 0\n\n display: flex\n flex-direction: column\n\n list-style: none\n\n li\n position: relative\n margin: 0\n\n > ul\n margin-left: var(--sidebar-item-spacing-horizontal)\n\n .icon\n color: var(--color-sidebar-link-text)\n\n .reference\n box-sizing: border-box\n color: var(--color-sidebar-link-text)\n\n // Fill the parent.\n display: inline-block\n line-height: var(--sidebar-item-line-height)\n text-decoration: none\n\n // Don't allow long words to cause wrapping.\n overflow-wrap: anywhere\n\n height: 100%\n width: 100%\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n &:hover\n background: var(--color-sidebar-item-background--hover)\n\n // Add a nice little \"external-link\" arrow here.\n &.external::after\n content: url('data:image/svg+xml,')\n margin: 0 0.25rem\n vertical-align: middle\n color: var(--color-sidebar-link-text)\n\n // Make the current page reference bold.\n .current-page > .reference\n font-weight: bold\n\n label\n position: absolute\n top: 0\n right: 0\n height: var(--sidebar-item-height)\n width: var(--sidebar-expander-width)\n\n cursor: pointer\n user-select: none\n\n display: flex\n justify-content: center\n align-items: center\n\n .caption, :not(.caption) > .caption-text\n font-size: var(--sidebar-caption-font-size)\n color: var(--color-sidebar-caption-text)\n\n font-weight: bold\n text-transform: uppercase\n\n margin: var(--sidebar-caption-space-above) 0 0 0\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n // If it has children, add a bit more padding to wrap the content to avoid\n // overlapping with the