diff --git a/heudiconv/bids.py b/heudiconv/bids.py
index 1bbfcf6e..95692787 100644
--- a/heudiconv/bids.py
+++ b/heudiconv/bids.py
@@ -240,6 +240,27 @@ def add_participant_record(studydir, subject, age, sex):
             known_subjects = {l.split('\t')[0] for l in f.readlines()}
         if participant_id in known_subjects:
             return
+    else:
+        # Populate particpants.json (an optional file to describe column names in
+        # participant.tsv). This auto generation will make BIDS-validator happy.
+        participants_json = op.join(studydir, 'participants.json')
+        if not op.lexists(participants_json):
+            save_json(participants_json,
+                OrderedDict([
+                    ("participant_id", OrderedDict([
+                        ("Description", "Participant identifier")])),
+                    ("age", OrderedDict([
+                        ("Description", "Age in years (TODO - verify) as in the initial"
+                            " session, might not be correct for other sessions")])),
+                    ("sex", OrderedDict([
+                        ("Description", "self-rated by participant, M for male/F for "
+                            "female (TODO: verify)")])),
+                    ("group", OrderedDict([
+                        ("Description", "(TODO: adjust - by default everyone is in "
+                            "control group)")])),
+                ]),
+                sort_keys=False,
+                indent=2)
     # Add a new participant
     with open(participants_tsv, 'a') as f:
         f.write(
@@ -311,7 +332,8 @@ def save_scans_key(item, bids_files):
 
 def add_rows_to_scans_keys_file(fn, newrows):
     """
-    Add new rows to file fn for scans key filename
+    Add new rows to file fn for scans key filename and generate accompanying json
+    descriptor to make BIDS validator happy.
 
     Parameters
     ----------
@@ -334,6 +356,25 @@ def add_rows_to_scans_keys_file(fn, newrows):
         os.unlink(fn)
     else:
         fnames2info = newrows
+        # Populate _scans.json (an optional file to describe column names in
+        # _scans.tsv). This auto generation will make BIDS-validator happy.
+        scans_json = '.'.join(fn.split('.')[:-1] + ['json'])
+        if not op.lexists(scans_json):
+            save_json(scans_json,
+                OrderedDict([
+                    ("filename", OrderedDict([
+                        ("Description", "Name of the nifti file")])),
+                    ("acq_time", OrderedDict([
+                        ("LongName", "Acquisition time"),
+                        ("Description", "Acquisition time of the particular scan")])),
+                    ("operator", OrderedDict([
+                        ("Description", "Name of the operator")])),
+                    ("randstr", OrderedDict([
+                        ("LongName", "Random string"),
+                        ("Description", "md5 hash of UIDs")])),
+                ]),
+                sort_keys=False,
+                indent=2)
 
     header = ['filename', 'acq_time', 'operator', 'randstr']
     # prepare all the data rows
diff --git a/heudiconv/tests/test_heuristics.py b/heudiconv/tests/test_heuristics.py
index c9485c6e..f36bbb4c 100644
--- a/heudiconv/tests/test_heuristics.py
+++ b/heudiconv/tests/test_heuristics.py
@@ -165,7 +165,13 @@ def test_notop(tmpdir, bidsoptions):
     runner(args)
 
     assert op.exists(pjoin(tmppath, 'Halchenko/Yarik/950_bids_test4'))
-    for fname in ['CHANGES', 'dataset_description.json', 'participants.tsv', 'README']:
+    for fname in [
+        'CHANGES',
+        'dataset_description.json',
+        'participants.tsv',
+        'README',
+        'participants.json'
+    ]:
         if 'notop' in bidsoptions:
             assert not op.exists(pjoin(tmppath, 'Halchenko/Yarik/950_bids_test4', fname))
         else:
diff --git a/heudiconv/tests/test_main.py b/heudiconv/tests/test_main.py
index 7dcd32ed..f4b77001 100644
--- a/heudiconv/tests/test_main.py
+++ b/heudiconv/tests/test_main.py
@@ -22,6 +22,7 @@
 from os.path import join as opj
 from six.moves import StringIO
 import stat
+import os.path as op
 
 
 @patch('sys.stdout', new_callable=StringIO)
@@ -205,6 +206,7 @@ def _check_rows(fn, rows):
         assert dates == sorted(dates)
 
     _check_rows(fn, rows)
+    assert op.exists(opj(tmpdir.strpath, 'file.json'))
     # add a new one
     extra_rows = {
         'a_new_file.nii.gz': ['2016adsfasd23', '', 'fasadfasdf'],