Skip to content

Commit

Permalink
examples: only use keep_linebreaks when reading TXT files (huggingfac…
Browse files Browse the repository at this point in the history
…e#13320)

* examples: only use keep_linebreaks when reading TXT files for all CLM examples

* examples: only use keep_linebreaks when reading TXT files for all CLM examples

* examples: only use keep_linebreaks when reading TXT files for all CLM examples
  • Loading branch information
stefan-it authored Aug 28, 2021
1 parent b6f332e commit 4046e66
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 14 deletions.
10 changes: 6 additions & 4 deletions examples/flax/language-modeling/run_clm_flax.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ class DataTrainingArguments:
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using CSV/JSON/TXT files or not."}
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)

def __post_init__(self):
Expand Down Expand Up @@ -305,29 +305,31 @@ def main():
)
else:
data_files = {}
dataset_args = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, **dataset_args)

if "validation" not in dataset.keys():
dataset["validation"] = load_dataset(
extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
**dataset_args,
)
dataset["train"] = load_dataset(
extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
Expand Down
10 changes: 6 additions & 4 deletions examples/pytorch/language-modeling/run_clm.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ class DataTrainingArguments:
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using CSV/JSON/TXT files or not."}
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)

def __post_init__(self):
Expand Down Expand Up @@ -269,6 +269,7 @@ def main():
)
else:
data_files = {}
dataset_args = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
Expand All @@ -280,22 +281,23 @@ def main():
)
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
keep_linebreaks=data_args.keep_linebreaks,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
**dataset_args,
)

# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
Expand Down
10 changes: 6 additions & 4 deletions examples/pytorch/language-modeling/run_clm_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def parse_args():
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using CSV/JSON/TXT files."
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files."
)

args = parser.parse_args()
Expand Down Expand Up @@ -248,27 +248,29 @@ def main():
)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files)
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
keep_linebreaks=not args.no_keep_linebreaks,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
keep_linebreaks=not args.no_keep_linebreaks,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
**dataset_args,
)

# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
Expand Down
6 changes: 4 additions & 2 deletions examples/tensorflow/language-modeling/run_clm.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ class DataTrainingArguments:
},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using CSV/JSON/TXT files or not."}
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)

def __post_init__(self):
Expand Down Expand Up @@ -321,14 +321,16 @@ def main():
)
else:
data_files = {}
dataset_args = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, keep_linebreaks=data_args.keep_linebreaks, data_files=data_files)
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
Expand Down

0 comments on commit 4046e66

Please sign in to comment.