Skip to content
This repository has been archived by the owner on Feb 22, 2020. It is now read-only.

Commit

Permalink
Merge pull request #351 from gnes-ai/feat-standardscaler
Browse files Browse the repository at this point in the history
feat(standarder): add standard scaler
  • Loading branch information
mergify[bot] authored Oct 23, 2019
2 parents 44a54be + 5d95c74 commit 5de329d
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 2 deletions.
12 changes: 10 additions & 2 deletions gnes/encoder/numeric/pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,19 @@
class PCAEncoder(BaseNumericEncoder):
batch_size = 2048

def __init__(self, output_dim: int, *args, **kwargs):
def __init__(self, output_dim: int, whiten: bool=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_dim = output_dim
self.whiten = whiten
self.pca_components = None
self.mean = None


def post_init(self):
from sklearn.decomposition import IncrementalPCA
self.pca = IncrementalPCA(n_components=self.output_dim)


@batching
def train(self, vecs: np.ndarray, *args, **kwargs) -> None:
num_samples, num_dim = vecs.shape
Expand All @@ -49,11 +52,16 @@ def train(self, vecs: np.ndarray, *args, **kwargs) -> None:

self.pca_components = np.transpose(self.pca.components_)
self.mean = self.pca.mean_.astype('float32')
self.explained_variance = self.pca.explained_variance_.astype('float32')


@train_required
@batching
def encode(self, vecs: np.ndarray, *args, **kwargs) -> np.ndarray:
return np.matmul(vecs - self.mean, self.pca_components)
X_transformed = np.matmul(vecs - self.mean, self.pca_components)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance)
return X_transformed


class PCALocalEncoder(BaseNumericEncoder):
Expand Down
45 changes: 45 additions & 0 deletions gnes/encoder/numeric/standarder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import numpy as np

from ..base import BaseNumericEncoder
from ...helper import batching, train_required


class StandarderEncoder(BaseNumericEncoder):
batch_size = 2048

def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mean = None
self.scale = None

def post_init(self):
from sklearn.preprocessing import StandardScaler
self.standarder = StandardScaler()

@batching
def train(self, vecs: np.ndarray, *args, **kwargs) -> None:
self.standarder.partial_fit(vecs)

self.mean = self.standarder.mean_.astype('float32')
self.scale = self.standarder.scale_.astype('float32')

@train_required
@batching
def encode(self, vecs: np.ndarray, *args, **kwargs) -> np.ndarray:
return (vecs - self.mean) / self.scale

0 comments on commit 5de329d

Please sign in to comment.