Skip to content

Commit

Permalink
Support new versions of tensorflow and numpy
Browse files Browse the repository at this point in the history
  • Loading branch information
hankcs committed Aug 22, 2024
1 parent 32ed29b commit e64efb8
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 8 deletions.
11 changes: 6 additions & 5 deletions hanlp/components/parsers/biaffine_parser_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,12 @@ def build_optimizer(self, optimizer='adam', lr=2e-3, mu=.9, nu=.9, epsilon=1e-12
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=lr,
decay_steps=decay_steps,
decay_rate=decay)
optimizer = tf.keras.optimizers.Adam(learning_rate=scheduler,
beta_1=mu,
beta_2=nu,
epsilon=epsilon,
clipnorm=clip)
from hanlp.optimizers.adamw.optimization import AdamTF
optimizer = AdamTF(learning_rate=scheduler,
beta_1=mu,
beta_2=nu,
epsilon=epsilon,
clipnorm=clip)
return optimizer
return super().build_optimizer(optimizer, **kwargs)

Expand Down
8 changes: 7 additions & 1 deletion hanlp/optimizers/adamw/optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,13 @@ def create_optimizer(init_lr, num_train_steps, num_warmup_steps):
return optimizer


class AdamWeightDecay(tf.keras.optimizers.Adam):
try:
AdamTF = tf.keras.optimizers.legacy.Adam # avoid slowdown when using v2.11+ Keras optimizers on M1/M2 Macs
except:
AdamTF = tf.keras.optimizers.Adam


class AdamWeightDecay(AdamTF):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
Expand Down
2 changes: 1 addition & 1 deletion hanlp/transform/conll_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ def batched_inputs_to_batches(self, corpus, indices, shuffle):
raw_batch = [[], [], [], []] if use_pos else [[], [], []]
max_len = len(max([corpus[i] for i in indices], key=len))
for idx in indices:
arc = np.zeros((max_len, max_len), dtype=np.bool)
arc = np.zeros((max_len, max_len), dtype=bool)
rel = np.zeros((max_len, max_len), dtype=np.int64)
for b in raw_batch[:2]:
b.append([])
Expand Down
2 changes: 1 addition & 1 deletion hanlp/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Author: hankcs
# Date: 2019-12-28 19:26

__version__ = '2.1.0-beta.59'
__version__ = '2.1.0-beta.60'
"""HanLP version"""


Expand Down

0 comments on commit e64efb8

Please sign in to comment.