Skip to content

Commit

Permalink
fix coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
HydrogenSulfate committed Nov 29, 2024
1 parent 3d70e7c commit 8b5b4a8
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 92 deletions.
18 changes: 13 additions & 5 deletions deepmd/pd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def train(
use_pretrain_script: bool = False,
force_load: bool = False,
output: str = "out.json",
):
) -> None:
log.info("Configuration path: %s", input_file)
SummaryPrinter()()
with open(input_file) as fin:
Expand Down Expand Up @@ -321,18 +321,26 @@ def train(
# save min_nbor_dist
if min_nbor_dist is not None:
if not multi_task:
trainer.model.min_nbor_dist = min_nbor_dist
trainer.model.min_nbor_dist = paddle.to_tensor(

Check warning on line 324 in deepmd/pd/entrypoints/main.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/entrypoints/main.py#L324

Added line #L324 was not covered by tests
min_nbor_dist,
dtype=paddle.float64,
place=DEVICE,
)
else:
for model_item in min_nbor_dist:
trainer.model[model_item].min_nbor_dist = min_nbor_dist[model_item]
trainer.model[model_item].min_nbor_dist = paddle.to_tensor(

Check warning on line 331 in deepmd/pd/entrypoints/main.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/entrypoints/main.py#L331

Added line #L331 was not covered by tests
min_nbor_dist[model_item],
dtype=paddle.float64,
place=DEVICE,
)
trainer.run()


def freeze(
model: str,
output: str = "frozen_model.json",
head: Optional[str] = None,
):
) -> None:
paddle.set_flags(
{
"FLAGS_save_cf_stack_op": 1,
Expand Down Expand Up @@ -383,7 +391,7 @@ def change_bias(
numb_batch: int = 0,
model_branch: Optional[str] = None,
output: Optional[str] = None,
):
) -> None:
if input_file.endswith(".pd"):
old_state_dict = paddle.load(input_file)
model_state_dict = copy.deepcopy(old_state_dict.get("model", old_state_dict))
Expand Down
60 changes: 39 additions & 21 deletions deepmd/pd/train/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@
)
from deepmd.pd.utils.dataloader import (
BufferedIterator,
get_weighted_sampler,
get_sampler_from_params,
)
from deepmd.pd.utils.env import (
DEVICE,
Expand Down Expand Up @@ -168,19 +168,7 @@ def get_opt_param(params):

def get_data_loader(_training_data, _validation_data, _training_params):
def get_dataloader_and_buffer(_data, _params):
if "auto_prob" in _training_params["training_data"]:
_sampler = get_weighted_sampler(
_data, _params["training_data"]["auto_prob"]
)
elif "sys_probs" in _training_params["training_data"]:
_sampler = get_weighted_sampler(
_data,
_params["training_data"]["sys_probs"],
sys_prob=True,
)
else:
_sampler = get_weighted_sampler(_data, "prob_sys_size")

_sampler = get_sampler_from_params(_data, _params)
if _sampler is None:
log.warning(
"Sampler not specified!"
Expand All @@ -201,14 +189,16 @@ def get_dataloader_and_buffer(_data, _params):
return _dataloader, _data_buffered

training_dataloader, training_data_buffered = get_dataloader_and_buffer(
_training_data, _training_params
_training_data, _training_params["training_data"]
)

if _validation_data is not None:
(
validation_dataloader,
validation_data_buffered,
) = get_dataloader_and_buffer(_validation_data, _training_params)
) = get_dataloader_and_buffer(
_validation_data, _training_params["validation_data"]
)
valid_numb_batch = _training_params["validation_data"].get(
"numb_btch", 1
)
Expand Down Expand Up @@ -283,7 +273,7 @@ def get_lr(lr_params):
self.opt_type, self.opt_param = get_opt_param(training_params)

# Model
self.model = get_model_for_wrapper(model_params)
self.model = get_model_for_wrapper(model_params, resuming=resuming)

# Loss
if not self.multi_task:
Expand Down Expand Up @@ -495,7 +485,7 @@ def collect_single_finetune_params(
_new_state_dict,
_origin_state_dict,
_random_state_dict,
):
) -> None:
_new_fitting = _finetune_rule_single.get_random_fitting()
_model_key_from = _finetune_rule_single.get_model_branch()
target_keys = [
Expand Down Expand Up @@ -668,7 +658,7 @@ def run(self):
core.nvprof_start()
core.nvprof_enable_record_event()

def step(_step_id, task_key="Default"):
def step(_step_id, task_key="Default") -> None:
# Paddle Profiler
if enable_profiling:
core.nvprof_nvtx_push(f"Training step {_step_id}")
Expand Down Expand Up @@ -886,7 +876,7 @@ def log_loss_valid(_task_key="Default"):
display_step_id % self.tensorboard_freq == 0 or display_step_id == 1
):
writer.add_scalar(f"{task_key}/lr", cur_lr, display_step_id)
writer.add_scalar(f"{task_key}/loss", loss, display_step_id)
writer.add_scalar(f"{task_key}/loss", loss.item(), display_step_id)

Check warning on line 879 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L879

Added line #L879 was not covered by tests
for item in more_loss:
writer.add_scalar(
f"{task_key}/{item}", more_loss[item].item(), display_step_id
Expand Down Expand Up @@ -1195,21 +1185,49 @@ def get_single_model(
return model


def get_model_for_wrapper(_model_params):
def get_model_for_wrapper(_model_params, resuming=False):
if "model_dict" not in _model_params:
_model = get_single_model(
_model_params,
)
else:
_model = {}
model_keys = list(_model_params["model_dict"])
do_case_embd, case_embd_index = get_case_embd_config(_model_params)

Check warning on line 1196 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1196

Added line #L1196 was not covered by tests
for _model_key in model_keys:
_model[_model_key] = get_single_model(
_model_params["model_dict"][_model_key],
)
if do_case_embd and not resuming:

Check warning on line 1201 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1201

Added line #L1201 was not covered by tests
# only set case_embd when from scratch multitask training
_model[_model_key].set_case_embd(case_embd_index[_model_key])

Check warning on line 1203 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1203

Added line #L1203 was not covered by tests
return _model


def get_case_embd_config(_model_params):
assert (

Check warning on line 1208 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1208

Added line #L1208 was not covered by tests
"model_dict" in _model_params
), "Only support setting case embedding for multi-task model!"
model_keys = list(_model_params["model_dict"])
sorted_model_keys = sorted(model_keys)
numb_case_embd_list = [

Check warning on line 1213 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1211-L1213

Added lines #L1211 - L1213 were not covered by tests
_model_params["model_dict"][model_key]
.get("fitting_net", {})
.get("dim_case_embd", 0)
for model_key in sorted_model_keys
]
if not all(item == numb_case_embd_list[0] for item in numb_case_embd_list):
raise ValueError(

Check warning on line 1220 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1219-L1220

Added lines #L1219 - L1220 were not covered by tests
f"All models must have the same dimension of case embedding, while the settings are: {numb_case_embd_list}"
)
if numb_case_embd_list[0] == 0:
return False, {}
case_embd_index = {

Check warning on line 1225 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1223-L1225

Added lines #L1223 - L1225 were not covered by tests
model_key: idx for idx, model_key in enumerate(sorted_model_keys)
}
return True, case_embd_index

Check warning on line 1228 in deepmd/pd/train/training.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/training.py#L1228

Added line #L1228 was not covered by tests


def model_change_out_bias(
_model,
_sample_func,
Expand Down
67 changes: 3 additions & 64 deletions deepmd/pd/train/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(
loss: paddle.nn.Layer | dict = None,
model_params=None,
shared_links=None,
):
) -> None:
"""Construct a DeePMD model wrapper.
Args:
Expand Down Expand Up @@ -64,74 +64,13 @@ def __init__(
self.loss[task_key] = loss[task_key]
self.inference_only = self.loss is None

def share_params(self, shared_links, resume=False):
def share_params(self, shared_links, resume=False) -> None:
"""
Share the parameters of classes following rules defined in shared_links during multitask training.
If not start from checkpoint (resume is False),
some separated parameters (e.g. mean and stddev) will be re-calculated across different classes.
"""
supported_types = ["descriptor", "fitting_net"]
for shared_item in shared_links:
class_name = shared_links[shared_item]["type"]
shared_base = shared_links[shared_item]["links"][0]
class_type_base = shared_base["shared_type"]
model_key_base = shared_base["model_key"]
shared_level_base = shared_base["shared_level"]
if "descriptor" in class_type_base:
if class_type_base == "descriptor":
base_class = self.model[model_key_base].get_descriptor()
elif "hybrid" in class_type_base:
raise NotImplementedError(
"Hybrid descriptor is not implemented yet"
)
else:
raise RuntimeError(f"Unknown class_type {class_type_base}!")
for link_item in shared_links[shared_item]["links"][1:]:
class_type_link = link_item["shared_type"]
model_key_link = link_item["model_key"]
shared_level_link = int(link_item["shared_level"])
assert (
shared_level_link >= shared_level_base
), "The shared_links must be sorted by shared_level!"
assert (
"descriptor" in class_type_link
), f"Class type mismatched: {class_type_base} vs {class_type_link}!"
if class_type_link == "descriptor":
link_class = self.model[model_key_link].get_descriptor()
elif "hybrid" in class_type_link:
raise NotImplementedError(
"Hybrid descriptor is not implemented yet"
)
else:
raise RuntimeError(f"Unknown class_type {class_type_link}!")
link_class.share_params(
base_class, shared_level_link, resume=resume
)
log.warning(
f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!"
)
else:
if hasattr(self.model[model_key_base], class_type_base):
base_class = self.model[model_key_base].__getattr__(class_type_base)
for link_item in shared_links[shared_item]["links"][1:]:
class_type_link = link_item["shared_type"]
model_key_link = link_item["model_key"]
shared_level_link = int(link_item["shared_level"])
assert (
shared_level_link >= shared_level_base
), "The shared_links must be sorted by shared_level!"
assert (
class_type_base == class_type_link
), f"Class type mismatched: {class_type_base} vs {class_type_link}!"
link_class = self.model[model_key_link].__getattr__(
class_type_link
)
link_class.share_params(
base_class, shared_level_link, resume=resume
)
log.warning(
f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!"
)
raise NotImplementedError("share_params is not implemented yet")

Check warning on line 73 in deepmd/pd/train/wrapper.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pd/train/wrapper.py#L73

Added line #L73 was not covered by tests

def forward(
self,
Expand Down
2 changes: 0 additions & 2 deletions source/tests/pd/model/test_autodiff.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,15 +190,13 @@ def setUp(self):
self.model = get_model(model_params).to(env.DEVICE)


@unittest.skip("Skip for not implemented yet")
class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest):
def setUp(self):
model_params = copy.deepcopy(model_dpa1)
self.type_split = True
self.model = get_model(model_params).to(env.DEVICE)


@unittest.skip("Skip for not implemented yet")
class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest):
def setUp(self):
model_params = copy.deepcopy(model_dpa1)
Expand Down

0 comments on commit 8b5b4a8

Please sign in to comment.