diff --git a/README.md b/README.md index 9512b83..a317894 100644 --- a/README.md +++ b/README.md @@ -225,10 +225,14 @@ The input archive files can also be automatically removed on successful import/s with the (-r) flag. The last successfully completed import can be identified with the (-l) flag. +All previously imported datasets can be shown with the (-L) flag. + +Note that a dataset can normally only be imported ONCE. To force an import of an +already completed dataset, use the (-f) flag. ### Help Output ``` -usage: sat_import.py [-h] [-o ORG] -d DATE [-n] [-r] [-l] +usage: sat_import.py [-h] [-o ORG] -d DATE [-n] [-r] [-l] [-L] [-f] Performs Import of Default Content View. @@ -239,6 +243,9 @@ optional arguments: -n, --nosync Do not trigger a sync after extracting content -r, --remove Remove input files after import has completed -l, --last Show the last successfully completed import date + -L, --list List all successfully completed imports + -c, --count Display all package counts after import + -f, --force Force import of data if it has previously been done ``` ### Examples diff --git a/man/sat_import.8 b/man/sat_import.8 index dbe1736..aeb1879 100644 --- a/man/sat_import.8 +++ b/man/sat_import.8 @@ -24,7 +24,7 @@ The import process consists of the following steps: - The import dataset is extracted to the import filesystem location. .RE .RS 3 -- The existence of each repository within the import dataset is verified in Satellite. +- The existence of each repository within the import dataset is verified in Satellite. .RS 2 Any repositories found in the import that do not exist in Satellite will be indicated. .RE @@ -33,7 +33,7 @@ Any repositories found in the import that do not exist in Satellite will be indi - Satellite will perform a bulk repository sync of the repositories within the import dataset. .RE .RS 3 -- At the completion of the import sync, a package count of each imported repository will be +- At the completion of the import sync, a package count of each imported repository will be .RS 2 performed and compared against the count from the sync host Satellite, and discrepancies will be displayed. .RE @@ -62,7 +62,7 @@ file, but can be overridden with this option. .BR "-d", " --dataset" .I "DATASET" .RS 3 -Dataset to import. The dataset name consists of the export date and the export environment that was defined during the +Dataset to import. The dataset name consists of the export date and the export environment that was defined during the .I sat_export process, for example '2016-12-06_SAT1' .RE @@ -72,6 +72,11 @@ process, for example '2016-12-06_SAT1' Display the name of the last successfully imported dataset. .RE .PP +.BR "-L", " --list" +.RS 3 +Display all of the previously imported datasets. +.RE +.PP .BR "-n", " --nosync" .RS 3 Skip repository sync of extracted RPM packages. The default action is to run a repository sync @@ -82,21 +87,26 @@ on all imported repositories. Using this option will extract the import dataset .RS 3 Delete the import dataset files after a successful import and sync operation. By default .B sat_import -will leave the import dataset in the import location. +will leave the import dataset in the import location. .br If the import process detects a repository in the dataset that does not exist in the Satellite, -the removal of the import dataset is skipped regardless of the setting of this flag. This is +the removal of the import dataset is skipped regardless of the setting of this flag. This is because the new repository will need to be manually defined in the Satellite and a manual sync of that repository performed. .RE .PP .BR "-c", " --count" .RS 3 -Display the full repository package count after the sync has completed. By default, only repositories -that have mis-matching content counts will be displayed. This option forces the package count of ALL +Display the full repository package count after the sync has completed. By default, only repositories +that have mis-matching content counts will be displayed. This option forces the package count of ALL repositories to be shown, even if there is no mis-match. .RE - +.PP +.BR "-f", " --force" +.RS 3 +Normally the script will prevent the importing of a dataset that has already been imported. +However, using this option will force an import of the dataset to be performed. +.RE .SH EXAMPLES Check when the last import was performed: @@ -132,4 +142,3 @@ Perform an import using dataset 2016-12-06_SAT1, removing input files when done: .SH AUTHOR Geoff Gatward - diff --git a/sat_import.py b/sat_import.py index 44327d4..d284048 100644 --- a/sat_import.py +++ b/sat_import.py @@ -18,8 +18,8 @@ def get_inputfiles(dataset): """ Verify the input files exist and are valid. - 'dataset' is a date (YYYY-MM-DD) provided by the user - date is in the filename of the archive - Returned 'basename' is the full export filename (sat6_export_YYYY-MM-DD) + 'dataset' is a date (YYYY-MM-DD_ENV) provided by the user - date is in the filename of the archive + Returned 'basename' is the full export filename (sat6_export_YYYY-MM-DD_ENV) """ basename = 'sat6_export_' + dataset shafile = basename + '.sha256' @@ -301,8 +301,12 @@ def main(args): required=False, action="store_true") parser.add_argument('-l', '--last', help='Display the last successful import performed', required=False, action="store_true") + parser.add_argument('-L', '--list', help='List all successfully completed imports', + required=False, action="store_true") parser.add_argument('-c', '--count', help='Display all package counts after import', required=False, action="store_true") + parser.add_argument('-f', '--force', help='Force import of data if it has previously been done', + required=False, action="store_true") args = parser.parse_args() # Set our script variables from the input args @@ -318,13 +322,25 @@ def main(args): # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) - # Display the last successful import - if args.last: + imports = [] + # Read the last imports data + if os.path.exists(vardir + '/imports.pkl'): + imports = pickle.load(open(vardir + '/imports.pkl', 'rb')) + # If we have a string we convert to a list. This should only occur the first time we + # migrate from the original string version of the pickle. + if type(imports) is str: + imports = imports.split() + last_import = imports[-1] + # Display the last successful import(s) + if args.last or args.list: if os.path.exists(vardir + '/imports.pkl'): - last_import = pickle.load(open(vardir + '/imports.pkl', 'rb')) - msg = "Last successful import was " + last_import - helpers.log_msg(msg, 'INFO') - print msg + if args.last: + msg = "Last successful import was " + last_import + helpers.log_msg(msg, 'INFO') + print msg + if args.list: + print "Completed imports:\n----------------" + for item in imports: print item else: msg = "Import has never been performed" helpers.log_msg(msg, 'INFO') @@ -335,6 +351,12 @@ def main(args): if args.dataset is None: parser.error("--dataset is required") + # If we have already imported this dataset let the user know + if dataset in imports: + if not args.force: + msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this." + helpers.log_msg(msg, 'WARNING') + sys.exit(1) # Figure out if we have the specified input fileset basename = get_inputfiles(dataset) @@ -393,11 +415,12 @@ def main(args): msg = "Import Complete" helpers.log_msg(msg, 'INFO') - # Save the last completed import data + # Save the last completed import data (append to existing pickle) os.chdir(script_dir) if not os.path.exists(vardir): os.makedirs(vardir) - pickle.dump(dataset, open(vardir + '/imports.pkl', "wb")) + imports.append(dataset) + pickle.dump(imports, open(vardir + '/imports.pkl', "wb")) # And exit. sys.exit(excode)