From 726caf59c5ebab268c58adf460b0a3defbb78934 Mon Sep 17 00:00:00 2001 From: CN_ChiTu <36254426+CNChTu@users.noreply.github.com> Date: Thu, 7 Mar 2024 03:55:09 +0800 Subject: [PATCH] v0.0.4 --- README.md | 18 ++++++++++++++++++ setup.py | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 02a7cae..62c1c08 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,17 @@ audio = torch.from_numpy(audio).float().unsqueeze(0).unsqueeze(-1).to(device) model = spawn_bundled_infer_model(device=device) # infer +''' +audio: wav, torch.Tensor +sr: sample rate +decoder_mode: [Optional] 'local_argmax' is recommended +threshold: [Optional] threshold for V/UV decision, 0.006 is recommended +f0_min: [Optional] minimum f0 +f0_max: [Optional] maximum f0 +interp_uv: [Optional] whether to interpolate unvoiced frames +output_interp_target_length: [Optional] If not None, the output f0 will be + interpolated to the target length +''' f0 = model.infer( audio, sr=sr, @@ -39,5 +50,12 @@ f0 = model.infer( ) print(f0) + +# the model is son class of torch.nn.Module, so you can use it as a normal pytorch model +# example: change device +model = model.to(device) +# example: compile model +model = torch.compile(model) + ``` diff --git a/setup.py b/setup.py index 62e3b4a..2ee9f59 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setup( name='torchfcpe', description='The official Pytorch implementation of Fast Context-based Pitch Estimation (FCPE)', - version='0.0.3', + version='0.0.4', author='CNChTu', author_email='2921046558@qq.com', url='https://github.com/CNChTu/FCPE',