-
Notifications
You must be signed in to change notification settings - Fork 3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
{KeyVault} Track 2 mgmt-plane adoption #14150
Changes from 105 commits
6a8684e
3b7055d
63d7e11
34a27f8
85d4da9
23871be
20cd498
fccd60d
210168e
d1e1926
6a0b5e4
38150e0
37f700f
91064fb
36f3549
1cb4823
577c17d
feff5c8
e6b148b
9776668
5fd1527
175b174
0f8cdc3
50159f1
f35deb3
c9a9a6d
9a649d4
0a565fb
2ec8658
4309dc9
f6a46b5
47b0c6d
29cf8f9
d7d530e
c8df0ae
cec17f7
a26bcb5
b7d7201
90d9c53
8162f7d
9a41cf3
b9b5b3a
dd21273
6c4c60d
c3940a2
f922616
2deefd6
d5d47f9
32acf18
ca957de
e284a5d
0d63104
692dd0f
b916eb0
639f86d
2f8478d
af3e536
7bef52f
d1a058b
575f2bb
4fdfb1f
1153042
d8d9681
d2ddf5e
9b6973d
d828c8f
1317d48
cbc372f
f119fe3
16bad45
88d24a7
cc0324d
ba6e5f2
b105a0f
09dce0a
2ee7034
2b12e15
e56a205
28ee63d
6cbae37
afe9196
56edcd7
653fe0d
588b39d
52aaa41
016c2f9
200d145
7009d75
fadee7d
5c7217c
4e6c448
7e7066c
e4ec376
fb74b3c
674aad8
eeb68f9
061234e
d78a396
b311c7c
35a6ffa
b20847c
dbe9023
d3fb97f
9a87853
2baeac6
2ee7546
a3807d9
d0677ba
18f529c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -35,6 +35,11 @@ | |
from msrest.exceptions import HttpOperationError | ||
""" | ||
|
||
track2_header = copyright_header + b"""import msrest.serialization | ||
from typing import Dict, List, Optional, Union | ||
from msrest.exceptions import HttpOperationError | ||
""" | ||
|
||
paging_header = copyright_header + b"""from msrest.paging import Paged | ||
""" | ||
|
||
|
@@ -59,26 +64,27 @@ def parse_input(input_parameter): | |
return package_name, module_name | ||
|
||
|
||
def solve_mro(models): | ||
def solve_mro(models, track2=False): | ||
for models_module in models: | ||
models_path = models_module.__path__[0] | ||
_LOGGER.info("Working on %s", models_path) | ||
if Path(models_path, "models_py3.py").exists(): | ||
|
||
if not track2 and Path(models_path, "models_py3.py").exists(): | ||
_LOGGER.info("Skipping since already patched") | ||
return | ||
|
||
# Build the new files in a temp folder | ||
with tempfile.TemporaryDirectory() as temp_folder: | ||
final_models_path = Path(temp_folder, "models") | ||
final_models_path.mkdir() | ||
solve_one_model(models_module, final_models_path) | ||
solve_one_model(models_module, final_models_path, track2=track2) | ||
|
||
# Switch the files | ||
shutil.rmtree(models_path) | ||
shutil.move(final_models_path, models_path) | ||
|
||
|
||
def solve_one_model(models_module, output_folder): | ||
def solve_one_model(models_module, output_folder, track2=False): | ||
"""Will build the compacted models in the output_folder""" | ||
|
||
models_classes = [ | ||
|
@@ -110,8 +116,8 @@ def solve_one_model(models_module, output_folder): | |
else: | ||
enum_file_module_name = None | ||
|
||
write_model_file(Path(output_folder, "models_py3.py"), models_classes) | ||
write_model_file(Path(output_folder, "models.py"), py2_models_classes) | ||
write_model_file(Path(output_folder, "models_py3.py"), models_classes, track2=track2) | ||
write_model_file(Path(output_folder, "models.py"), py2_models_classes, track2=track2) | ||
write_paging_file(Path(output_folder, "paged_models.py"), paged_models_classes) | ||
write_init( | ||
Path(output_folder, "__init__.py"), | ||
|
@@ -122,9 +128,12 @@ def solve_one_model(models_module, output_folder): | |
) | ||
|
||
|
||
def write_model_file(output_file_path, classes_to_write): | ||
def write_model_file(output_file_path, classes_to_write, track2=False): | ||
with open(output_file_path, "bw") as write_fd: | ||
write_fd.write(header) | ||
if track2: | ||
write_fd.write(track2_header) | ||
else: | ||
write_fd.write(header) | ||
|
||
for model in classes_to_write: | ||
_, model_file_path, _ = model | ||
|
@@ -191,7 +200,7 @@ def find_models_to_change(module_name): | |
return [ | ||
importlib.import_module('.' + label + '.models', main_module.__name__) | ||
for (_, label, ispkg) in pkgutil.iter_modules(main_module.__path__) | ||
if ispkg | ||
if ispkg and label != 'aio' | ||
] | ||
|
||
|
||
|
@@ -220,7 +229,15 @@ def find_autorest_generated_folder(module_prefix="azure.mgmt"): | |
if __name__ == "__main__": | ||
logging.basicConfig(level=logging.INFO) | ||
|
||
track2_packages = [ | ||
'azure.mgmt.keyvault' | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. will future track2 mgmt sdks need to add this line also? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @yungezz Yes, I think. Anyway, this is a hack script and maybe we should totally understand it and try to deprecate it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This script was involved at this point: #7061 The motivation is to resolve SDK model references. I think track 2 SDKs won't have the reference related issue. |
||
] | ||
prefix = sys.argv[1] if len(sys.argv) >= 2 else "azure.mgmt" | ||
for autorest_package in find_autorest_generated_folder(prefix): | ||
models = find_models_to_change(autorest_package) | ||
solve_mro(models) | ||
track2 = False | ||
for track2_pkg in track2_packages: | ||
if autorest_package.startswith(track2_pkg): | ||
track2 = True | ||
break | ||
solve_mro(models, track2=track2) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -636,6 +636,10 @@ def get_provisioning_state(instance): | |
properties = getattr(instance, 'properties', None) | ||
if properties: | ||
provisioning_state = getattr(properties, 'provisioning_state', None) | ||
# some SDK, like keyvault, has 'provisioningState' under 'properties.additional_properties' | ||
if not provisioning_state: | ||
additional_properties = getattr(properties, 'additional_properties', {}) | ||
provisioning_state = additional_properties.get('provisioningState') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For this case, is There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @arrownj This is a bug in core. The There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. better to add some comments here to explain it |
||
return provisioning_state | ||
|
||
def handler(args): | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -388,3 +388,12 @@ | |
az keyvault update --enabled-for-disk-encryption true --name MyKeyVault --resource-group MyResourceGroup | ||
crafted: true | ||
""" | ||
|
||
helps['keyvault wait'] = """ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is this a new command? or just fix of help file? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
@yungezz This is for LRO. |
||
type: command | ||
short-summary: Place the CLI in a waiting state until a condition of the vault is met. | ||
examples: | ||
- name: Pause CLI until the vault is created. | ||
text: | | ||
az keyvault wait --name MyVault --created | ||
""" |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -242,13 +242,14 @@ def validate_deleted_vault_name(cmd, ns): | |
if not vault: | ||
raise CLIError('No deleted vault was found with name ' + ns.vault_name) | ||
|
||
setattr(ns, 'resource_group_name', getattr(ns, 'resource_group_name', None) or id_comps['resource_group']) | ||
|
||
# resource_group_name must match the resource group of the deleted vault | ||
if id_comps['resource_group'] != ns.resource_group_name: | ||
raise CLIError("The specified resource group does not match that of the deleted vault %s. The vault " | ||
"must be recovered to the original resource group %s." | ||
% (vault_name, id_comps['resource_group'])) | ||
if 'purge' not in cmd.name: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What's There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @arrownj There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The code below line 245 is for automatically setting |
||
setattr(ns, 'resource_group_name', getattr(ns, 'resource_group_name', None) or id_comps['resource_group']) | ||
|
||
# resource_group_name must match the resource group of the deleted vault | ||
if id_comps['resource_group'] != ns.resource_group_name: | ||
raise CLIError("The specified resource group does not match that of the deleted vault %s. The vault " | ||
"must be recovered to the original resource group %s." | ||
% (vault_name, id_comps['resource_group'])) | ||
|
||
|
||
def validate_x509_certificate_chain(ns): | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,6 +24,7 @@ | |
def load_command_table(self, _): | ||
mgmt_api_version = str(get_api_version(self.cli_ctx, ResourceType.MGMT_KEYVAULT)) | ||
mgmt_api_version = mgmt_api_version.replace('-', '_') | ||
|
||
data_api_version = str(get_api_version(self.cli_ctx, ResourceType.DATA_KEYVAULT)) | ||
data_api_version = data_api_version.replace('.', '_') | ||
data_api_version = data_api_version.replace('-', '_') | ||
|
@@ -61,9 +62,9 @@ def load_command_table(self, _): | |
|
||
# Management Plane Commands | ||
with self.command_group('keyvault', kv_vaults_sdk, client_factory=keyvault_client_vaults_factory) as g: | ||
g.custom_command('create', 'create_keyvault', | ||
g.custom_command('create', 'create_keyvault', supports_no_wait=True, | ||
doc_string_source='azure.mgmt.keyvault.v' + mgmt_api_version + '.models#VaultProperties') | ||
g.custom_command('recover', 'recover_keyvault') | ||
g.custom_command('recover', 'recover_keyvault', supports_no_wait=True) | ||
g.custom_command('list', 'list_keyvault') | ||
g.show_command('show', 'get') | ||
g.command('delete', 'delete', deprecate_info=g.deprecate( | ||
|
@@ -73,21 +74,23 @@ def load_command_table(self, _): | |
'the soft deleted state. Please see the following documentation for additional ' | ||
'guidance.\n' | ||
'https://docs.microsoft.com/en-us/azure/key-vault/general/soft-delete-overview')) | ||
g.command('purge', 'purge_deleted') | ||
g.custom_command('set-policy', 'set_policy') | ||
g.custom_command('delete-policy', 'delete_policy') | ||
g.command('purge', 'begin_purge_deleted', supports_no_wait=True) | ||
g.custom_command('set-policy', 'set_policy', supports_no_wait=True) | ||
g.custom_command('delete-policy', 'delete_policy', supports_no_wait=True) | ||
g.command('list-deleted', 'list_deleted') | ||
g.generic_update_command( | ||
'update', setter_name='update_keyvault_setter', setter_type=kv_vaults_custom, | ||
custom_func_name='update_keyvault', | ||
doc_string_source='azure.mgmt.keyvault.v' + mgmt_api_version + '.models#VaultProperties') | ||
doc_string_source='azure.mgmt.keyvault.v' + mgmt_api_version + '.models#VaultProperties', | ||
supports_no_wait=True) | ||
g.wait_command('wait') | ||
|
||
with self.command_group('keyvault network-rule', | ||
kv_vaults_sdk, | ||
min_api='2018-02-14', | ||
client_factory=keyvault_client_vaults_factory) as g: | ||
g.custom_command('add', 'add_network_rule') | ||
g.custom_command('remove', 'remove_network_rule') | ||
g.custom_command('add', 'add_network_rule', supports_no_wait=True) | ||
g.custom_command('remove', 'remove_network_rule', supports_no_wait=True) | ||
Comment on lines
+92
to
+93
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. in these scenarios, is there a wait command to wait util condition satisfied? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Juliehzl Good catch. Added. |
||
g.custom_command('list', 'list_network_rules') | ||
|
||
with self.command_group('keyvault private-endpoint-connection', | ||
|
@@ -99,7 +102,7 @@ def load_command_table(self, _): | |
validator=validate_private_endpoint_connection_id) | ||
g.custom_command('reject', 'reject_private_endpoint_connection', supports_no_wait=True, | ||
validator=validate_private_endpoint_connection_id) | ||
g.command('delete', 'delete', validator=validate_private_endpoint_connection_id) | ||
g.command('delete', 'begin_delete', validator=validate_private_endpoint_connection_id, supports_no_wait=True) | ||
g.show_command('show', 'get', validator=validate_private_endpoint_connection_id) | ||
g.wait_command('wait', validator=validate_private_endpoint_connection_id) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is quite hack. is there better way to do it, like feedback to sdk team, maybe add it in track2 generation as a option?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@yungezz Actually the SDK is great, the problem is within this
patch_models.py
script. This script is totally a hack script.