From db956dba58e08638326a958c1647df3741178d4f Mon Sep 17 00:00:00 2001 From: chkp-royl Date: Sun, 18 Feb 2024 15:29:39 +0200 Subject: [PATCH 1/5] Fix bug in resource_checkpoint_management_access_rule.go --- ...ource_checkpoint_management_access_rule.go | 123 ++++-------------- ...ource_checkpoint_management_access_rule.go | 113 ++++------------ checkpoint/utils.go | 35 +++++ ..._management_data_access_rule.html.markdown | 33 +++-- website/docs/index.html.markdown | 104 ++++++++------- ...point_management_access_rule.html.markdown | 100 +++++++++++--- 6 files changed, 250 insertions(+), 258 deletions(-) diff --git a/checkpoint/data_source_checkpoint_management_access_rule.go b/checkpoint/data_source_checkpoint_management_access_rule.go index 61bec0f1..a5dcde3f 100644 --- a/checkpoint/data_source_checkpoint_management_access_rule.go +++ b/checkpoint/data_source_checkpoint_management_access_rule.go @@ -5,7 +5,6 @@ import ( checkpoint "github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" - "reflect" "strconv" ) @@ -254,6 +253,14 @@ func dataSourceManagementAccessRule() *schema.Resource { Computed: true, Description: "Comments string.", }, + "fields_with_uid_identifier": { + Type: schema.TypeSet, + Optional: true, + Description: "List of resource fields that will use object UIDs as object identifiers. Default is object name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, }, } } @@ -296,7 +303,11 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if v := accessRule["action"]; v != nil { - _ = d.Set("action", v.(map[string]interface{})["name"]) + actionId := resolveObjectIdentifier("action", accessRule["action"], d) + if actionId == "Inner Layer" { + actionId = "Apply Layer" + } + _ = d.Set("action", actionId) } if accessRule["action-settings"] != nil { @@ -319,20 +330,8 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if accessRule["content"] != nil { - contentJson := accessRule["content"].([]interface{}) - contentIds := make([]string, 0) - if len(contentJson) > 0 { - for _, content := range contentJson { - content := content.(map[string]interface{}) - contentIds = append(contentIds, content["name"].(string)) - } - } - _, contentInConf := d.GetOk("content") - if contentIds[0] == "Any" && !contentInConf { - _ = d.Set("content", []interface{}{}) - } else { - _ = d.Set("content", contentIds) - } + contentIds := resolveListOfIdentifiers("content", accessRule["content"], d) + _ = d.Set("content", contentIds) } else { _ = d.Set("content", nil) } @@ -362,33 +361,14 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e if v, _ := customFieldsMap["field-3"]; v != nil { customFieldsMapToReturn["field_3"] = v } - - _, customFieldsInConf := d.GetOk("custom_fields") - defaultCustomField := map[string]interface{}{"field_1": "", "field_2": "", "field_3": ""} - if reflect.DeepEqual(defaultCustomField, customFieldsMapToReturn) && !customFieldsInConf { - _ = d.Set("custom_fields", map[string]interface{}{}) - } else { - _ = d.Set("custom_fields", customFieldsMapToReturn) - } + _ = d.Set("custom_fields", customFieldsMapToReturn) } else { _ = d.Set("custom_fields", nil) } if accessRule["destination"] != nil { - destinationJson := accessRule["destination"].([]interface{}) - destinationIds := make([]string, 0) - if len(destinationJson) > 0 { - for _, destination := range destinationJson { - destination := destination.(map[string]interface{}) - destinationIds = append(destinationIds, destination["name"].(string)) - } - } - _, destinationInConf := d.GetOk("destination") - if destinationIds[0] == "Any" && !destinationInConf { - _ = d.Set("destination", []interface{}{}) - } else { - _ = d.Set("destination", destinationIds) - } + destinationIds := resolveListOfIdentifiers("destination", accessRule["destination"], d) + _ = d.Set("destination", destinationIds) } if v := accessRule["destination-negate"]; v != nil { @@ -404,37 +384,13 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if accessRule["install-on"] != nil { - installOnJson := accessRule["install-on"].([]interface{}) - installOnJsonIds := make([]string, 0) - if len(installOnJson) > 0 { - for _, installOn := range installOnJson { - installOn := installOn.(map[string]interface{}) - installOnJsonIds = append(installOnJsonIds, installOn["name"].(string)) - } - } - _, installOnInConf := d.GetOk("install_on") - if installOnJsonIds[0] == "Policy Targets" && !installOnInConf { - _ = d.Set("install_on", []interface{}{}) - } else { - _ = d.Set("install_on", installOnJsonIds) - } + installOnIds := resolveListOfIdentifiers("install-on", accessRule["install-on"], d) + _ = d.Set("install_on", installOnIds) } if accessRule["service"] != nil { - serviceJson := accessRule["service"].([]interface{}) - serviceJsonIds := make([]string, 0) - if len(serviceJson) > 0 { - for _, service := range serviceJson { - service := service.(map[string]interface{}) - serviceJsonIds = append(serviceJsonIds, service["name"].(string)) - } - } - _, serviceInConf := d.GetOk("service") - if serviceJsonIds[0] == "Any" && !serviceInConf { - _ = d.Set("service", []interface{}{}) - } else { - _ = d.Set("service", serviceJsonIds) - } + serviceIds := resolveListOfIdentifiers("service", accessRule["service"], d) + _ = d.Set("service", serviceIds) } if v := accessRule["service-negate"]; v != nil { @@ -442,20 +398,8 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if accessRule["source"] != nil { - sourceJson := accessRule["source"].([]interface{}) - sourceIds := make([]string, 0) - if len(sourceJson) > 0 { - for _, source := range sourceJson { - source := source.(map[string]interface{}) - sourceIds = append(sourceIds, source["name"].(string)) - } - } - _, sourceInConf := d.GetOk("source") - if sourceIds[0] == "Any" && !sourceInConf { - _ = d.Set("source", []interface{}{}) - } else { - _ = d.Set("source", sourceIds) - } + sourceIds := resolveListOfIdentifiers("source", accessRule["source"], d) + _ = d.Set("source", sourceIds) } if v := accessRule["source-negate"]; v != nil { @@ -463,20 +407,8 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if accessRule["time"] != nil { - timeJson := accessRule["time"].([]interface{}) - timeIds := make([]string, 0) - if len(timeJson) > 0 { - for _, time := range timeJson { - time := time.(map[string]interface{}) - timeIds = append(timeIds, time["name"].(string)) - } - } - _, timeInConf := d.GetOk("time") - if timeIds[0] == "Any" && !timeInConf { - _ = d.Set("time", []interface{}{}) - } else { - _ = d.Set("time", timeIds) - } + timeIds := resolveListOfIdentifiers("time", accessRule["time"], d) + _ = d.Set("time", timeIds) } if accessRule["track"] != nil { @@ -551,7 +483,8 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if v := accessRule["vpn"]; v != nil { - _ = d.Set("vpn", v.([]interface{})[0].(map[string]interface{})["name"]) + vpnId := resolveObjectIdentifier("vpn", v.([]interface{})[0], d) + _ = d.Set("vpn", vpnId) } if v := accessRule["comments"]; v != nil { diff --git a/checkpoint/resource_checkpoint_management_access_rule.go b/checkpoint/resource_checkpoint_management_access_rule.go index f6a25133..9f3194f7 100644 --- a/checkpoint/resource_checkpoint_management_access_rule.go +++ b/checkpoint/resource_checkpoint_management_access_rule.go @@ -312,6 +312,14 @@ func resourceManagementAccessRule() *schema.Resource { Optional: true, Description: "Comments string.", }, + "fields_with_uid_identifier": { + Type: schema.TypeSet, + Optional: true, + Description: "List of resource fields that will use object UIDs as object identifiers. Default is object name.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, }, } } @@ -544,7 +552,11 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if v := accessRule["action"]; v != nil { - _ = d.Set("action", v.(map[string]interface{})["name"]) + actionId := resolveObjectIdentifier("action", accessRule["action"], d) + if actionId == "Inner Layer" { + actionId = "Apply Layer" + } + _ = d.Set("action", actionId) } if accessRule["action-settings"] != nil { @@ -567,20 +579,8 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if accessRule["content"] != nil { - contentJson := accessRule["content"].([]interface{}) - contentIds := make([]string, 0) - if len(contentJson) > 0 { - for _, content := range contentJson { - content := content.(map[string]interface{}) - contentIds = append(contentIds, content["name"].(string)) - } - } - _, contentInConf := d.GetOk("content") - if contentIds[0] == "Any" && !contentInConf { - _ = d.Set("content", []interface{}{}) - } else { - _ = d.Set("content", contentIds) - } + contentIds := resolveListOfIdentifiers("content", accessRule["content"], d) + _ = d.Set("content", contentIds) } else { _ = d.Set("content", nil) } @@ -623,20 +623,8 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if accessRule["destination"] != nil { - destinationJson := accessRule["destination"].([]interface{}) - destinationIds := make([]string, 0) - if len(destinationJson) > 0 { - for _, destination := range destinationJson { - destination := destination.(map[string]interface{}) - destinationIds = append(destinationIds, destination["name"].(string)) - } - } - _, destinationInConf := d.GetOk("destination") - if destinationIds[0] == "Any" && !destinationInConf { - _ = d.Set("destination", []interface{}{}) - } else { - _ = d.Set("destination", destinationIds) - } + destinationIds := resolveListOfIdentifiers("destination", accessRule["destination"], d) + _ = d.Set("destination", destinationIds) } if v := accessRule["destination-negate"]; v != nil { @@ -652,37 +640,13 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if accessRule["install-on"] != nil { - installOnJson := accessRule["install-on"].([]interface{}) - installOnJsonIds := make([]string, 0) - if len(installOnJson) > 0 { - for _, installOn := range installOnJson { - installOn := installOn.(map[string]interface{}) - installOnJsonIds = append(installOnJsonIds, installOn["name"].(string)) - } - } - _, installOnInConf := d.GetOk("install_on") - if installOnJsonIds[0] == "Policy Targets" && !installOnInConf { - _ = d.Set("install_on", []interface{}{}) - } else { - _ = d.Set("install_on", installOnJsonIds) - } + installOnIds := resolveListOfIdentifiers("install-on", accessRule["install-on"], d) + _ = d.Set("install_on", installOnIds) } if accessRule["service"] != nil { - serviceJson := accessRule["service"].([]interface{}) - serviceJsonIds := make([]string, 0) - if len(serviceJson) > 0 { - for _, service := range serviceJson { - service := service.(map[string]interface{}) - serviceJsonIds = append(serviceJsonIds, service["name"].(string)) - } - } - _, serviceInConf := d.GetOk("service") - if serviceJsonIds[0] == "Any" && !serviceInConf { - _ = d.Set("service", []interface{}{}) - } else { - _ = d.Set("service", serviceJsonIds) - } + serviceIds := resolveListOfIdentifiers("service", accessRule["service"], d) + _ = d.Set("service", serviceIds) } if v := accessRule["service-negate"]; v != nil { @@ -690,20 +654,8 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if accessRule["source"] != nil { - sourceJson := accessRule["source"].([]interface{}) - sourceIds := make([]string, 0) - if len(sourceJson) > 0 { - for _, source := range sourceJson { - source := source.(map[string]interface{}) - sourceIds = append(sourceIds, source["name"].(string)) - } - } - _, sourceInConf := d.GetOk("source") - if sourceIds[0] == "Any" && !sourceInConf { - _ = d.Set("source", []interface{}{}) - } else { - _ = d.Set("source", sourceIds) - } + sourceIds := resolveListOfIdentifiers("source", accessRule["source"], d) + _ = d.Set("source", sourceIds) } if v := accessRule["source-negate"]; v != nil { @@ -711,20 +663,8 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if accessRule["time"] != nil { - timeJson := accessRule["time"].([]interface{}) - timeIds := make([]string, 0) - if len(timeJson) > 0 { - for _, time := range timeJson { - time := time.(map[string]interface{}) - timeIds = append(timeIds, time["name"].(string)) - } - } - _, timeInConf := d.GetOk("time") - if timeIds[0] == "Any" && !timeInConf { - _ = d.Set("time", []interface{}{}) - } else { - _ = d.Set("time", timeIds) - } + timeIds := resolveListOfIdentifiers("time", accessRule["time"], d) + _ = d.Set("time", timeIds) } if accessRule["track"] != nil { @@ -807,7 +747,8 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if v := accessRule["vpn"]; v != nil { - _ = d.Set("vpn", v.([]interface{})[0].(map[string]interface{})["name"]) + vpnId := resolveObjectIdentifier("vpn", v.([]interface{})[0], d) + _ = d.Set("vpn", vpnId) } if v := accessRule["comments"]; v != nil { diff --git a/checkpoint/utils.go b/checkpoint/utils.go index 4633e4db..a3c8ca52 100644 --- a/checkpoint/utils.go +++ b/checkpoint/utils.go @@ -153,3 +153,38 @@ func isArgDefault(v string, d *schema.ResourceData, arg string, defaultVal strin isDefault := v == defaultVal && ok return v != defaultVal || isDefault } + +func resolveListOfIdentifiers(fieldName string, jsonResponse interface{}, d *schema.ResourceData) []string { + res := make([]string, 0) + key := "name" // by default we use name as object identifier + + if v, ok := d.GetOk("fields_with_uid_identifier"); ok { + fieldsSupportUidList := v.(*schema.Set).List() + if len(fieldsSupportUidList) > 0 { + for _, field := range fieldsSupportUidList { + if field == fieldName { + key = "uid" + break + } + } + } + } + + if arr, ok := jsonResponse.([]interface{}); ok { + if len(arr) > 0 { + for _, obj := range arr { + res = append(res, obj.(map[string]interface{})[key].(string)) + } + } + } else { + if obj, ok := jsonResponse.(map[string]interface{}); ok { + res = append(res, obj[key].(string)) + } + } + + return res +} + +func resolveObjectIdentifier(fieldName string, jsonResponse interface{}, d *schema.ResourceData) string { + return resolveListOfIdentifiers(fieldName, jsonResponse, d)[0] +} diff --git a/website/docs/d/checkpoint_management_data_access_rule.html.markdown b/website/docs/d/checkpoint_management_data_access_rule.html.markdown index d817c0d3..ab320d38 100644 --- a/website/docs/d/checkpoint_management_data_access_rule.html.markdown +++ b/website/docs/d/checkpoint_management_data_access_rule.html.markdown @@ -13,24 +13,28 @@ Use this data source to get information on an existing Check Point Access Rule. ```hcl resource "checkpoint_management_access_rule" "access_rule" { - name = "My Rule" - layer = "Network" - position = { top = "top" } - source = ["Any"] + name = "My Rule" + layer = "Network" + position = { top = "top" } + source = ["Any"] destination = ["Any"] - service = ["Any"] - track = { - accounting = false - alert = "none" + service = ["Any"] + content = ["Any"] + time = ["Any"] + install_on = ["Policy Targets"] + track = { + type = "Log" + accounting = false + alert = "none" enable_firewall_session = false - per_connection = false - per_session = false - type = "None" + per_connection = true + per_session = false } + custom_fields = {} } data "checkpoint_management_data_access_rule" "data_access_rule" { - name = "${checkpoint_management_access_rule.access_rule.name}" + name = "${checkpoint_management_access_rule.access_rule.name}" layer = "${checkpoint_management_access_rule.access_rule.layer}" } ``` @@ -57,11 +61,12 @@ The following arguments are supported: * `service_negate` - True if negate is set for service. * `source` - Collection of Network objects identified by the name or UID. * `source_negate` - True if negate is set for source. -* `time` - List of time objects. For example: \"Weekend\", \"Off-Work\", \"Every-Day\". +* `time` - List of time objects. For example: "Weekend", "Off-Work", "Every-Day". * `track` - Track Settings. Track Settings blocks are documented below. * `user_check` - User check settings. User check settings blocks are documented below. -* `vpn` - Communities or Directional. +* `vpn` - VPN community identified by name or UID or "Any" or "All_GwToGw". * `comments` - Comments string. +* `fields_with_uid_identifier` - (Optional) List of resource fields that will use object UIDs as object identifiers. Default is object name. `action_settings` supports the following: diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 857826e8..61dfe442 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -15,21 +15,23 @@ next-generation firewalls and automate routine Security Management configuration configuration errors. With the Check Point provider, DevOps teams can automate their security and transform it into DevSecOps workflows. -## Examples usage -## Terraform 0.12 and earlier: +## Examples of usage +To use Check Point provider, copy and paste this code into your Terraform configuration, update provider configuration and run `terraform init`. +## Terraform 0.12 and earlier: ```hcl -# Configure the Check Point Provider +# Configure Check Point Provider for Management API provider "checkpoint" { server = "192.0.2.1" username = "aa" password = "aaaa" context = "web_api" + session_name = "Terraform Session" } # Create network object resource "checkpoint_management_network" "network" { - name = "network" + name = "My network" subnet4 = "192.0.2.0" mask_length4 = "24" # ... @@ -40,23 +42,32 @@ resource "checkpoint_management_network" "network" { terraform { required_providers { checkpoint = { - source = "checkpointsw/checkpoint" - version = "~> 1.6.0" + source = "CheckPointSW/checkpoint" + version = "X.Y.Z" } } } -# Configure the Check Point Provider +# Configure Check Point Provider for Management API provider "checkpoint" { server = "192.0.2.1" username = "aa" password = "aaaa" context = "web_api" + session_name = "Terraform Session" +} + +# Create network object +resource "checkpoint_management_network" "network" { + name = "My network" + subnet4 = "192.0.2.0" + mask_length4 = "24" + # ... } ``` ```hcl -# Configure the Check Point Provider for GAIA API +# Configure Check Point Provider for GAIA API provider "checkpoint" { server = "192.0.2.1" username = "gaia_user" @@ -66,7 +77,7 @@ provider "checkpoint" { # Set machine hostname resource "checkpoint_hostname" "hostname" { - name = "terrahost" + name = "terraform_host" } ``` @@ -80,15 +91,13 @@ The following arguments are supported: the `CHECKPOINT_USERNAME` environment variable. * `password` - (Optional) Check Point Management admin password. It must be provided, but can also be defined via the `CHECKPOINT_PASSWORD` environment variable. -* `api_key` - (Optional) Check Point Management admin api key. This can also be defined via +* `api_key` - (Optional) Check Point Management admin API key. It must be provided, but can also be defined via the `CHECKPOINT_API_KEY` environment variable. -* `context` - (Optional) Check Point access context - `web_api` or `gaia_api`. This can also be defined via - the `CHECKPOINT_CONTEXT` environment variable. Default value is `web_api`. * `domain` - (Optional) Login to specific domain. Domain can be identified by name or UID. This can also be defined via the `CHECKPOINT_DOMAIN` environment variable. -* `timeout` - (Optional) Timeout in seconds for the Go SDK to complete a transaction. This can also be defined via - the `CHECKPOINT_TIMEOUT` environment variable. Default value is `120` seconds. -* `port` - (Optional) Port used for connection to the API server. This can also be defined via the `CHECKPOINT_PORT` +* `context` - (Optional) Check Point access context - `web_api` or `gaia_api`. This can also be defined via + the `CHECKPOINT_CONTEXT` environment variable. Default value is `web_api`. +* `port` - (Optional) Port used for connection with the API server. This can also be defined via the `CHECKPOINT_PORT` environment variable. Default value is `443`. * `proxy_host` - (Optional) Proxy host used for proxy connections. This can also be defined via the `CHECKPOINT_PROXY_HOST` environment variable. @@ -101,13 +110,15 @@ The following arguments are supported: the `CHECKPOINT_SESSION_FILE_NAME` environment variable. default value is `sid.json`. * `session_timeout` - (Optional) Timeout in seconds for the session established in Check Point. This can also be defined via the `CHECKPOINT_SESSION_TIMEOUT` environment variable. The default for the value is `600`. The timeout can be `10` - `3600`. +* `timeout` - (Optional) Timeout in seconds for the Go SDK to complete a transaction. This can also be defined via + the `CHECKPOINT_TIMEOUT` environment variable. Default value is `120` seconds. * `cloud_mgmt_id` - (Optional) Smart-1 Cloud management UID. This can also be defined via the `CHECKPOINT_CLOUD_MGMT_ID` environment variable. * `auto_publish_batch_size` - (Optional) Number of batch size to automatically run publish. This can also be defined via the `CHECKPOINT_AUTO_PUBLISH_BATCH_SIZE` environment variable. ## Authentication -The Check Point provider offers providing credentials for authentication. The following methods are supported: +Check Point Provider offers providing credentials for authentication. The following methods are supported: - Static credentials - Environment variables @@ -121,32 +132,32 @@ provider "checkpoint" { server = "192.0.2.1" username = "aa" password = "aaaa" - context = "web_api" domain = "Domain Name" + context = "web_api" } ``` -Usage with api key: + +Usage with API key: ```hcl provider "checkpoint" { server = "192.0.2.1" api_key = "tBdloE9eOYzzSQicNxS7mA==" - context = "web_api" domain = "Domain Name" + context = "web_api" } ``` -Smart-1 Cloud: +Usage for Smart-1 Cloud: ```hcl provider "checkpoint" { server = "chkp-vmnc6s4y.maas.checkpoint.com" api_key = "tBdloE9eOYzzSQicNxS7mA==" - context = "web_api" cloud_mgmt_id = "de9a9b08-c7c7-436e-a64a-a54136301701" + context = "web_api" } ``` Or for GAIA API: - ```hcl provider "checkpoint" { server = "192.0.2.1" @@ -203,12 +214,12 @@ $ export CHECKPOINT_AUTO_PUBLISH_BATCH_SIZE=100 Then configure the Check Point Provider as following: ```hcl -# Configure the Check Point Provider +# Configure Check Point Provider via environment variables provider "checkpoint" {} # Create network object resource "checkpoint_management_network" "network" { - name = "network" + name = "My network" subnet4 = "192.0.2.0" mask_length4 = "24" # ... @@ -229,25 +240,25 @@ $ export CHECKPOINT_PORT=443 Then configure the Check Point Provider as following: ```hcl -# Configure the Check Point Provider +# Configure Check Point Provider via environment variables provider "checkpoint" {} # Set machine hostname resource "checkpoint_hostname" "hostname" { - name = "terrahost" + name = "terraform_host" } ``` -## Post Apply/Destroy commands +## Post Apply / Destroy scripts As of right now, Terraform does not provide native support for publish and install-policy, so both of them and more post apply actions are handled out-of-band. -In order to use post Apply/Destroy commands, the authentication method must be via environment variables. +In order to use post Apply / Destroy commands, the authentication method must be via environment variables. ### Publish -Please use the following for publish: +Please use the following script for Publish: ```bash $ cd $GOPATH/src/github.com/terraform-providers/terraform-provider-checkpoint/commands/publish @@ -255,15 +266,17 @@ $ go build publish.go $ mv publish $GOPATH/src/github.com/terraform-providers/terraform-provider-checkpoint $ terraform apply && publish ``` + +Another option is to use `auto_publish_batch_size` provider argument which automatically runs publish. + ### Install Policy The following arguments are supported: * `policy-package` - (Required) The name of the Policy Package to be installed. -* `target` - (Required) On what targets to execute this command. Targets may be identified by their name, or object - unique identifier. Multiple targets can be added. +* `target` - (Required) On what targets to execute this command. Targets may be identified by their name or object unique identifier. Multiple targets can be added. -Please use the following for install policy: +Please use the following script for Install Policy: ```bash $ cd $GOPATH/src/github.com/terraform-providers/terraform-provider-checkpoint/commands/install_policy @@ -274,7 +287,7 @@ $ terraform apply && install_policy -policy-package -target Date: Mon, 5 Aug 2024 12:00:15 +0300 Subject: [PATCH 2/5] Add support to VPN communities and directional --- ...ource_checkpoint_management_access_rule.go | 64 ++++++++++++- checkpoint/provider.go | 2 +- ...ource_checkpoint_management_access_rule.go | 93 ++++++++++++++++++- ..._management_data_access_rule.html.markdown | 9 +- ...point_management_access_rule.html.markdown | 17 +++- 5 files changed, 174 insertions(+), 11 deletions(-) diff --git a/checkpoint/data_source_checkpoint_management_access_rule.go b/checkpoint/data_source_checkpoint_management_access_rule.go index a5dcde3f..8b09d364 100644 --- a/checkpoint/data_source_checkpoint_management_access_rule.go +++ b/checkpoint/data_source_checkpoint_management_access_rule.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "log" "strconv" + "strings" ) func dataSourceManagementAccessRule() *schema.Resource { @@ -246,7 +247,34 @@ func dataSourceManagementAccessRule() *schema.Resource { "vpn": { Type: schema.TypeString, Computed: true, - Description: "Communities or Directional.", + Description: "Valid values \"Any\", \"All_GwToGw\" or VPN community name", + }, + "vpn_communities": { + Type: schema.TypeSet, + Computed: true, + Description: "VPN communities (used for multiple VPNs, otherwise, use \"vpn\" field)", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vpn_directional": { + Type: schema.TypeList, + Computed: true, + Description: "VPN directional", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeString, + Computed: true, + Description: "From VPN community", + }, + "to": { + Type: schema.TypeString, + Computed: true, + Description: "To VPN community", + }, + }, + }, }, "comments": { Type: schema.TypeString, @@ -483,8 +511,38 @@ func dataSourceManagementAccessRuleRead(d *schema.ResourceData, m interface{}) e } if v := accessRule["vpn"]; v != nil { - vpnId := resolveObjectIdentifier("vpn", v.([]interface{})[0], d) - _ = d.Set("vpn", vpnId) + vpnList := v.([]interface{}) + if len(vpnList) > 0 { + vpnType := vpnList[0].(map[string]interface{})["type"].(string) + if len(vpnList) == 1 && vpnType != "VpnDirectionalElement" { // BC + vpnId := resolveObjectIdentifier("vpn", v.([]interface{})[0], d) + _ = d.Set("vpn", vpnId) + _ = d.Set("vpn_communities", nil) + _ = d.Set("vpn_directional", nil) + } else if vpnType != "VpnDirectionalElement" { + vpnIds := resolveListOfIdentifiers("vpn", vpnList, d) + _ = d.Set("vpn_communities", vpnIds) + _ = d.Set("vpn", nil) + _ = d.Set("vpn_directional", nil) + } else if vpnType == "VpnDirectionalElement" { + var vpnDirectionalListState []map[string]interface{} + for i := range vpnList { + vpnDirectionalObj := vpnList[i].(map[string]interface{}) + if v, _ := vpnDirectionalObj["name"]; v != nil { + vpnDirectionalNames := strings.Split(v.(string), "->") + vpnDirectionalState := make(map[string]interface{}) + vpnDirectionalState["from"] = vpnDirectionalNames[0] + vpnDirectionalState["to"] = vpnDirectionalNames[1] + vpnDirectionalListState = append(vpnDirectionalListState, vpnDirectionalState) + } + } + _ = d.Set("vpn_directional", vpnDirectionalListState) + _ = d.Set("vpn_communities", nil) + _ = d.Set("vpn", nil) + } else { + return fmt.Errorf("Cannot read invalid VPN type [" + vpnType + "]") + } + } } if v := accessRule["comments"]; v != nil { diff --git a/checkpoint/provider.go b/checkpoint/provider.go index 7fb7f377..8eedc1cf 100644 --- a/checkpoint/provider.go +++ b/checkpoint/provider.go @@ -539,7 +539,7 @@ func login(client *checkpoint.ApiClient, username string, pwd string, apiKey str if err != nil { localRequestsError := "invalid character '<' looking for beginning of value" if strings.Contains(err.Error(), localRequestsError) { - return Session{}, fmt.Errorf("login failure: API server needs to be configured to accept requests from all IP addresses") + return Session{}, fmt.Errorf("Login to management server failed: API server needs to be configured to accept requests from all IP addresses") } return Session{}, err } diff --git a/checkpoint/resource_checkpoint_management_access_rule.go b/checkpoint/resource_checkpoint_management_access_rule.go index 9f3194f7..5eebe375 100644 --- a/checkpoint/resource_checkpoint_management_access_rule.go +++ b/checkpoint/resource_checkpoint_management_access_rule.go @@ -292,8 +292,34 @@ func resourceManagementAccessRule() *schema.Resource { "vpn": &schema.Schema{ Type: schema.TypeString, Optional: true, - Description: "Communities or Directional.", - Default: "Any", + Description: "Valid values \"Any\", \"All_GwToGw\" or VPN community name", + }, + "vpn_communities": { + Type: schema.TypeSet, + Optional: true, + Description: "VPN communities (used for multiple VPNs, otherwise, use \"vpn\" field)", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "vpn_directional": { + Type: schema.TypeList, + Optional: true, + Description: "VPN directional", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeString, + Optional: true, + Description: "From VPN community", + }, + "to": { + Type: schema.TypeString, + Optional: true, + Description: "To VPN community", + }, + }, + }, }, "ignore_warnings": { Type: schema.TypeBool, @@ -490,6 +516,12 @@ func createManagementAccessRule(d *schema.ResourceData, m interface{}) error { accessRule["user-check"] = userCheckPayload } } + if v, ok := d.GetOk("vpn_directional"); ok { + accessRule["vpn"] = v + } + if v, ok := d.GetOk("vpn_communities"); ok { + accessRule["vpn"] = v.(*schema.Set).List() + } if v, ok := d.GetOk("vpn"); ok { accessRule["vpn"] = v.(string) } @@ -747,8 +779,44 @@ func readManagementAccessRule(d *schema.ResourceData, m interface{}) error { } if v := accessRule["vpn"]; v != nil { - vpnId := resolveObjectIdentifier("vpn", v.([]interface{})[0], d) - _ = d.Set("vpn", vpnId) + vpnList := v.([]interface{}) + if len(vpnList) > 0 { + vpnType := vpnList[0].(map[string]interface{})["type"].(string) + if len(vpnList) == 1 && vpnType != "VpnDirectionalElement" { // BC + vpnId := resolveObjectIdentifier("vpn", v.([]interface{})[0], d) + _, vpnCommunitiesUsed := d.GetOk("vpn_communities") + if vpnCommunitiesUsed { + _ = d.Set("vpn", nil) + _ = d.Set("vpn_communities", []interface{}{vpnId}) + } else { + _ = d.Set("vpn", vpnId) + _ = d.Set("vpn_communities", nil) + } + _ = d.Set("vpn_directional", nil) + } else if vpnType != "VpnDirectionalElement" { + vpnIds := resolveListOfIdentifiers("vpn", vpnList, d) + _ = d.Set("vpn_communities", vpnIds) + _ = d.Set("vpn", nil) + _ = d.Set("vpn_directional", nil) + } else if vpnType == "VpnDirectionalElement" { + var vpnDirectionalListState []map[string]interface{} + for i := range vpnList { + vpnDirectionalObj := vpnList[i].(map[string]interface{}) + if v, _ := vpnDirectionalObj["name"]; v != nil { + vpnDirectionalNames := strings.Split(v.(string), "->") + vpnDirectionalState := make(map[string]interface{}) + vpnDirectionalState["from"] = vpnDirectionalNames[0] + vpnDirectionalState["to"] = vpnDirectionalNames[1] + vpnDirectionalListState = append(vpnDirectionalListState, vpnDirectionalState) + } + } + _ = d.Set("vpn_directional", vpnDirectionalListState) + _ = d.Set("vpn_communities", nil) + _ = d.Set("vpn", nil) + } else { + return fmt.Errorf("Cannot read invalid VPN type [" + vpnType + "]") + } + } } if v := accessRule["comments"]; v != nil { @@ -1020,13 +1088,28 @@ func updateManagementAccessRule(d *schema.ResourceData, m interface{}) error { } } + if d.HasChange("vpn_directional") { + if v, ok := d.GetOk("vpn_directional"); ok { + accessRule["vpn"] = v + } + } + + if d.HasChange("vpn_communities") { + if v, ok := d.GetOk("vpn_communities"); ok { + accessRule["vpn"] = v.(*schema.Set).List() + } + } + if d.HasChange("vpn") { - accessRule["vpn"] = d.Get("vpn") + if v, ok := d.GetOk("vpn"); ok { + accessRule["vpn"] = v + } } if v, ok := d.GetOk("ignore_errors"); ok { accessRule["ignore-errors"] = v.(bool) } + if v, ok := d.GetOk("ignore_warnings"); ok { accessRule["ignore-warnings"] = v.(bool) } diff --git a/website/docs/d/checkpoint_management_data_access_rule.html.markdown b/website/docs/d/checkpoint_management_data_access_rule.html.markdown index ab320d38..86b0ffe3 100644 --- a/website/docs/d/checkpoint_management_data_access_rule.html.markdown +++ b/website/docs/d/checkpoint_management_data_access_rule.html.markdown @@ -31,6 +31,7 @@ resource "checkpoint_management_access_rule" "access_rule" { per_session = false } custom_fields = {} + vpn = "Any" } data "checkpoint_management_data_access_rule" "data_access_rule" { @@ -64,7 +65,9 @@ The following arguments are supported: * `time` - List of time objects. For example: "Weekend", "Off-Work", "Every-Day". * `track` - Track Settings. Track Settings blocks are documented below. * `user_check` - User check settings. User check settings blocks are documented below. -* `vpn` - VPN community identified by name or UID or "Any" or "All_GwToGw". +* `vpn` - VPN community identified by name or "Any" or "All_GwToGw". +* `vpn_communities` - Collection of VPN communities identified by name. +* `vpn_directional` - Collection of VPN directional. VPN directional block documented below. * `comments` - Comments string. * `fields_with_uid_identifier` - (Optional) List of resource fields that will use object UIDs as object identifiers. Default is object name. @@ -100,6 +103,10 @@ The following arguments are supported: * `every` * `unit` +`vpn_directional` supports the following: + +* `from` - From VPN community. +* `to` - To VPN community. diff --git a/website/docs/r/checkpoint_management_access_rule.html.markdown b/website/docs/r/checkpoint_management_access_rule.html.markdown index 0865d2f7..b3dbff77 100644 --- a/website/docs/r/checkpoint_management_access_rule.html.markdown +++ b/website/docs/r/checkpoint_management_access_rule.html.markdown @@ -34,6 +34,7 @@ resource "checkpoint_management_access_rule" "rule1" { } action_settings = {} custom_fields = {} + vpn = "Any" } resource "checkpoint_management_access_rule" "rule2" { @@ -58,6 +59,7 @@ resource "checkpoint_management_access_rule" "rule2" { } action_settings = {} custom_fields = {} + vpn = "All_GwToGw" } resource "checkpoint_management_access_rule" "rule3" { @@ -85,6 +87,7 @@ resource "checkpoint_management_access_rule" "rule3" { per_session = false } custom_fields = {} + vpn_communities = ["StarCommunity", "MeshedCommunity"] } resource "checkpoint_management_access_rule" "rule4" { @@ -108,6 +111,10 @@ resource "checkpoint_management_access_rule" "rule4" { } action_settings = {} custom_fields = {} + vpn_directional { + from = "StarVpn" + to = "MeshedCommunity" + } } resource "checkpoint_management_access_rule" "rule5" { @@ -133,6 +140,7 @@ resource "checkpoint_management_access_rule" "rule5" { per_session = false } custom_fields = {} + vpn = "Any" } ``` @@ -161,7 +169,9 @@ The following arguments are supported: * `time` - (Optional) List of time objects. For example: "Weekend", "Off-Work", "Every-Day". * `track` - (Optional) Track Settings. Track Settings blocks are documented below. * `user_check` - (Optional) User check settings. User check settings blocks are documented below. -* `vpn` - (Optional) VPN community identified by name or UID or "Any" or "All_GwToGw". +* `vpn` - (Optional) VPN community identified by name or "Any" or "All_GwToGw". +* `vpn_communities` - (Optional) Collection of VPN communities identified by name. +* `vpn_directional` - (Optional) Collection of VPN directional. VPN directional block documented below. * `ignore_warnings` - (Optional) Apply changes ignoring warnings. * `ignore_errors` - (Optional) Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored. * `comments` - (Optional) Comments string. @@ -206,6 +216,11 @@ The following arguments are supported: * `every` - (Optional) N/A. * `unit` - (Optional) N/A. +`vpn_directional` supports the following: + +* `from` - (Optional) From VPN community. +* `to` - (Optional) To VPN community. + ## Import `checkpoint_management_access_rule` can be imported by using the following format: LAYER_NAME;RULE_UID From 13d40e7dd1a0fb1a271695ac2706540a1deedcd3 Mon Sep 17 00:00:00 2001 From: chkp-royl <51701986+chkp-royl@users.noreply.github.com> Date: Mon, 5 Aug 2024 12:30:25 +0300 Subject: [PATCH 3/5] Update resource_checkpoint_management_access_rule.go --- checkpoint/resource_checkpoint_management_access_rule.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/checkpoint/resource_checkpoint_management_access_rule.go b/checkpoint/resource_checkpoint_management_access_rule.go index 3b3fa61b..fb38ec67 100644 --- a/checkpoint/resource_checkpoint_management_access_rule.go +++ b/checkpoint/resource_checkpoint_management_access_rule.go @@ -298,7 +298,7 @@ func resourceManagementAccessRule() *schema.Resource { "vpn_communities": { Type: schema.TypeSet, Optional: true, - Description: "VPN communities (used for multiple VPNs, otherwise, use \"vpn\" field)", + Description: "Collection of VPN communities identified by name", Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -306,7 +306,7 @@ func resourceManagementAccessRule() *schema.Resource { "vpn_directional": { Type: schema.TypeList, Optional: true, - Description: "VPN directional", + Description: "Collection of VPN directional", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "from": { From 2635041f6eee5561677347eca8b863e87de97ba8 Mon Sep 17 00:00:00 2001 From: chkp-royl <51701986+chkp-royl@users.noreply.github.com> Date: Mon, 5 Aug 2024 12:34:36 +0300 Subject: [PATCH 4/5] Update data_source_checkpoint_management_access_rule.go --- checkpoint/data_source_checkpoint_management_access_rule.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/checkpoint/data_source_checkpoint_management_access_rule.go b/checkpoint/data_source_checkpoint_management_access_rule.go index 8b09d364..ea93fd7b 100644 --- a/checkpoint/data_source_checkpoint_management_access_rule.go +++ b/checkpoint/data_source_checkpoint_management_access_rule.go @@ -252,7 +252,7 @@ func dataSourceManagementAccessRule() *schema.Resource { "vpn_communities": { Type: schema.TypeSet, Computed: true, - Description: "VPN communities (used for multiple VPNs, otherwise, use \"vpn\" field)", + Description: "Collection of VPN communities identified by name", Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -260,7 +260,7 @@ func dataSourceManagementAccessRule() *schema.Resource { "vpn_directional": { Type: schema.TypeList, Computed: true, - Description: "VPN directional", + Description: "Collection of VPN directional", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "from": { From 01a5f60851108ac7266fbf29d8af1342793e4ff6 Mon Sep 17 00:00:00 2001 From: chkp-royl Date: Sun, 29 Sep 2024 14:03:53 +0300 Subject: [PATCH 5/5] Use GO SDK v1.7.2 --- go.mod | 2 +- go.sum | 4 +- .../cp-mgmt-api-go-sdk/APIFiles/APIClient.go | 97 ++++++---- .../APIFiles/APIClientArgs.go | 44 +++-- vendor/github.com/hashicorp/hcl/.gitignore | 18 +- vendor/github.com/hashicorp/hcl/Makefile | 36 ++-- .../klauspost/compress/fse/README.md | 156 ++++++++-------- .../klauspost/compress/huff0/README.md | 174 +++++++++--------- .../mitchellh/copystructure/README.md | 42 ++--- vendor/github.com/spf13/afero/.travis.yml | 42 ++--- vendor/modules.txt | 2 +- 11 files changed, 316 insertions(+), 301 deletions(-) diff --git a/go.mod b/go.mod index b4949930..3675ea43 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/CheckPointSW/terraform-provider-checkpoint require ( - github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.1 + github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.2 github.com/hashicorp/terraform-plugin-sdk v1.17.0 ) diff --git a/go.sum b/go.sum index 3e6a8bbb..522c57de 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.1 h1:wrkITUkWsguTps+5h9CzSzYM5GdFBLekjh3dr8IHjs8= -github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.1/go.mod h1:3NU+v6M7/Er4fECsNh7SlDwdsCYImVVqp14A01xBmU4= +github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.2 h1:D6nyYm1N1IxjL9ZgSp7p4UAlIAAGZVmMRs7XDyRMGIo= +github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.2/go.mod h1:3NU+v6M7/Er4fECsNh7SlDwdsCYImVVqp14A01xBmU4= github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= diff --git a/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClient.go b/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClient.go index 4a8ae82e..7b68307a 100644 --- a/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClient.go +++ b/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClient.go @@ -31,17 +31,17 @@ import ( ) const ( - InProgress string = "in progress" - DefaultPort int = 443 - Limit int = 50 - Filename string = "fingerprints.json" - TimeOut time.Duration = time.Second * 10 - SleepTime time.Duration = time.Second * 2 - GaiaContext string = "gaia_api" - WebContext string = "web_api" - DefaultProxyPort = -1 - DefaultProxyHost = "" - AutoPublishBatchSize int = 100 + InProgress string = "in progress" + DefaultPort int = 443 + Limit int = 50 + Filename string = "fingerprints.json" + TimeOut time.Duration = time.Second * 10 + SleepTime time.Duration = time.Second * 2 + GaiaContext string = "gaia_api" + WebContext string = "web_api" + DefaultProxyPort = -1 + DefaultProxyHost = "" + AutoPublishBatchSize int = 100 ) // Check Point API Client (Management/GAIA) @@ -66,15 +66,15 @@ type ApiClient struct { userAgent string cloudMgmtId string autoPublishBatchSize int - activeCallsLock sync.Mutex - autoPublishLock sync.Mutex - totalCallsLock sync.Mutex + activeCallsLock sync.Mutex + autoPublishLock sync.Mutex + totalCallsLock sync.Mutex duringPublish bool - activeCallsCtr int - totalCallsCtr int + activeCallsCtr int + totalCallsCtr int } -// Api Client constructor +// ApiClient constructor // Input ApiClientArgs // Returns new client instance func APIClient(apiCA ApiClientArgs) *ApiClient { @@ -212,7 +212,7 @@ func (c *ApiClient) DisableAutoPublish() { c.totalCallsCtr = 0 } -// Deprecated: Do not use. +// Deprecated: Do not use. Use ApiLogin instead func (c *ApiClient) Login(username string, password string, continueLastSession bool, domain string, readOnly bool, payload string) (APIResponse, error) { credentials := map[string]interface{}{ "user": username, @@ -221,7 +221,7 @@ func (c *ApiClient) Login(username string, password string, continueLastSession return c.commonLoginLogic(credentials, continueLastSession, domain, readOnly, make(map[string]interface{})) } -// Deprecated: Do not use. +// Deprecated: Do not use. Use ApiLoginWithApiKey instead func (c *ApiClient) LoginWithApiKey(apiKey string, continueLastSession bool, domain string, readOnly bool, payload string) (APIResponse, error) { credentials := map[string]interface{}{ "api-key": apiKey, @@ -230,7 +230,7 @@ func (c *ApiClient) LoginWithApiKey(apiKey string, continueLastSession bool, dom } /* -Performs a 'login' API call to management server +Performs login API call to the management server using username and password username: Check Point admin name password: Check Point admin password @@ -240,7 +240,6 @@ read_only: [optional] Login with Read Only permissions. This parameter is not co payload: [optional] More settings for the login command returns: APIResponse, error side-effects: updates the class's uid and server variables - */ func (c *ApiClient) ApiLogin(username string, password string, continueLastSession bool, domain string, readOnly bool, payload map[string]interface{}) (APIResponse, error) { credentials := map[string]interface{}{ @@ -251,7 +250,7 @@ func (c *ApiClient) ApiLogin(username string, password string, continueLastSessi } /* -performs a 'login' API call to the management server +Performs login API call to the management server using api key api_key: Check Point api-key continue_last_session: [optional] It is possible to continue the last Check Point session @@ -309,15 +308,16 @@ command: the command is placed in the URL field payload: a JSON object (or a string representing a JSON object) with the command arguments sid: The Check Point session-id. when omitted use self.sid. waitForTask: determines the behavior when the API server responds with a "task-id". + by default, the function will periodically check the status of the task and will not return until the task is completed. when wait_for_task=False, it is up to the user to call the "show-task" API and check the status of the command. + useProxy: Determines if the user wants to use the proxy server and port provider. method: HTTP request method - POST by default return: APIResponse object side-effects: updates the class's uid and server variables - */ func (c *ApiClient) ApiCall(command string, payload map[string]interface{}, sid string, waitForTask bool, useProxy bool, method ...string) (APIResponse, error) { return c.apiCall(command, payload, sid, waitForTask, useProxy, false, method...) @@ -411,9 +411,9 @@ func (c *ApiClient) apiCall(command string, payload map[string]interface{}, sid if !internal && c.autoPublishBatchSize > 0 { waitToRun := true for waitToRun { - if c.totalCallsCtr + 1 <= c.autoPublishBatchSize && !c.duringPublish { + if c.totalCallsCtr+1 <= c.autoPublishBatchSize && !c.duringPublish { c.totalCallsLock.Lock() - if c.totalCallsCtr + 1 <= c.autoPublishBatchSize && !c.duringPublish { + if c.totalCallsCtr+1 <= c.autoPublishBatchSize && !c.duringPublish { c.totalCallsCtr++ waitToRun = false } @@ -519,9 +519,9 @@ func (c *ApiClient) apiCall(command string, payload map[string]interface{}, sid if !internal && c.autoPublishBatchSize > 0 { c.decreaseActiveCalls() - if c.totalCallsCtr > 0 && c.totalCallsCtr % c.autoPublishBatchSize == 0 && !c.duringPublish { + if c.totalCallsCtr > 0 && c.totalCallsCtr%c.autoPublishBatchSize == 0 && !c.duringPublish { c.autoPublishLock.Lock() - if c.totalCallsCtr > 0 && c.totalCallsCtr % c.autoPublishBatchSize == 0 && !c.duringPublish { + if c.totalCallsCtr > 0 && c.totalCallsCtr%c.autoPublishBatchSize == 0 && !c.duringPublish { c.duringPublish = true c.autoPublishLock.Unlock() for c.activeCallsCtr > 0 { @@ -531,16 +531,16 @@ func (c *ApiClient) apiCall(command string, payload map[string]interface{}, sid } // Going to publish fmt.Println("Start auto publish...") - publishRes, _ := c.apiCall("publish", map[string]interface{}{},c.GetSessionID(),true,c.IsProxyUsed(), true) + publishRes, _ := c.apiCall("publish", map[string]interface{}{}, c.GetSessionID(), true, c.IsProxyUsed(), true) if !publishRes.Success { fmt.Println("Auto publish failed. Message: " + publishRes.ErrorMsg) - }else{ + } else { fmt.Println("Auto publish finished successfully") } c.totalCallsCtr = 0 c.duringPublish = false - }else{ + } else { c.autoPublishLock.Unlock() } } @@ -549,7 +549,8 @@ func (c *ApiClient) apiCall(command string, payload map[string]interface{}, sid return res, nil } -/** +/* +* The APIs that return a list of objects are limited by the number of objects that they return. To get the full list of objects, there's a need to make repeated API calls each time using a different offset until all the objects are returned. @@ -558,19 +559,25 @@ note: this function calls gen_api_query and iterates over the generator until it then returns. command: name of API command. This command should be an API that returns an array of + objects (for example: show-hosts, show networks, ...) + details_level: query APIs always take a details-level argument. + possible values are "standard", "full", "uid" + container_key: name of the key that holds the objects in the JSON response (usually "objects"). include_container_key: If set to False the 'data' field of the APIResponse object + will be a list of the wanted objects. Otherwise, the date field of the APIResponse will be a dictionary in the following + format: { container_key: [ List of the wanted objects], "total": size of the list} payload: a JSON object (or a string representing a JSON object) with the command arguments return: if include-container-key is False: + an APIResponse object whose .data member contains a list of the objects requested: [ , , , ...] if include-container-key is True: an APIResponse object whose .data member contains a dict: { container_key: [...], "total": n } - */ func (c *ApiClient) ApiQuery(command string, detailsLevel string, containerKey string, includeContainerKey bool, payload map[string]interface{}) (APIResponse, error) { @@ -617,7 +624,9 @@ This is in contrast to normal API calls that return only a limited number of obj This function can be used to show progress when requesting many objects (i.e. "Received x/y objects.") command: name of API command. This command should be an API that returns an array of objects + (for example: show-hosts, show networks, ...) + details_level: query APIs always take a details-level argument. Possible values are "standard", "full", "uid" container_keys: the field in the .data dict that contains the objects payload: a JSON object (or a string representing a JSON object) with the command arguments @@ -712,7 +721,8 @@ func (c *ApiClient) genApiQuery(command string, detailsLevel string, containerKe return serverResponse } -/** +/* +* When the server needs to perform an API call that may take a long time (e.g. run-script, install-policy, publish), the server responds with a 'task-id'. Using the show-task API it is possible to check on the status of this task until its completion. @@ -759,10 +769,10 @@ func (c *ApiClient) waitForTask(taskId string) (APIResponse, error) { totalTasks := 0 for _, task := range taskResult.GetData()["tasks"].([]interface{}) { totalTasks++ - if task.(map[string]interface{})["status"].(string) != InProgress { + taskMap := task.(map[string]interface{}) + if taskMap["status"] != nil && taskMap["status"].(string) != InProgress { completedTasks++ } - } if completedTasks == totalTasks { @@ -778,7 +788,8 @@ func (c *ApiClient) waitForTask(taskId string) (APIResponse, error) { } -/** +/* +* The version of waitForTask function for the collection of tasks task_objects: A list of task objects @@ -823,7 +834,8 @@ func (c *ApiClient) waitForTasks(taskObjects []interface{}) APIResponse { } -/** +/* +* This method checks if one of the tasks failed and if so, changes the response status to be False task_result: api_response returned from "show-task" command @@ -846,7 +858,8 @@ func checkTasksStatus(taskResult *APIResponse) { @===================@ */ -/** +/* +* This function checks if the server's certificate is stored in the local fingerprints file. If the server's fingerprint is not found, an HTTPS connection is made to the server and the user is asked if he or she accepts the server's fingerprint. @@ -928,7 +941,8 @@ func (c *ApiClient) loadFingerprintFromFile() (string, error) { } -/** +/* +* This function takes the content of the file $FILENAME (which is a json file) and parses it's content to a map (from string to string) @@ -960,13 +974,16 @@ func (c *ApiClient) fpFileToMap() (map[string]string, error) { } -/** +/* +* store a server's fingerprint into a local file. server: the IP address/name of the Check Point management server. fingerprint: A SHA1 fingerprint of the server's certificate. filename: The file in which to store the certificates. The file will hold a JSON structure in which + the key is the server and the value is its fingerprint. + return: 'True' if everything went well. 'False' if there was some kind of error storing the fingerprint. */ func (c *ApiClient) saveFingerprintToFile(server string, fingerprint string) error { diff --git a/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClientArgs.go b/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClientArgs.go index a03b73d7..555dca7b 100644 --- a/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClientArgs.go +++ b/vendor/github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles/APIClientArgs.go @@ -2,7 +2,7 @@ package api_go_sdk import "time" -// Api Client arguments to init a new instance +// ApiClient arguments to init a new instance type ApiClientArgs struct { Port int Fingerprint string @@ -16,16 +16,15 @@ type ApiClientArgs struct { AcceptServerCertificate bool DebugFile string Context string - Timeout time.Duration - Sleep time.Duration - UserAgent string + Timeout time.Duration + Sleep time.Duration + UserAgent string CloudMgmtId string - AutoPublishBatchSize int + AutoPublishBatchSize int } /* -Init a new APIClientArgs -Construct a new ApiClientArgs instance with the given parameters. +Init new APIClientArgs Port: the port that is being used Fingerprint: server's fingerprint @@ -42,26 +41,25 @@ Timeout: HTTP Client timeout value Sleep: Interval size in seconds of the task update UserAgent: User agent will be use in api call request header CloudMgmtId: Smart-1 Cloud management UID -AutoPublishBatchSize: Number of batch size for auto publish +AutoPublishBatchSize: Publish will run automatically on every 'X' number of api calls, set -1 (or any negative number) to disable the feature */ func APIClientArgs(port int, fingerprint string, sid string, server string, proxyHost string, proxyPort int, apiVersion string, ignoreServerCertificate bool, acceptServerCertificate bool, debugFile string, context string, timeout time.Duration, sleep time.Duration, userAgent string, cloudMgmtId string, autoPublishBatchSize int) ApiClientArgs { - return ApiClientArgs{ - Port: port, - Fingerprint: fingerprint, - Sid: sid, - Server: server, - ProxyHost: proxyHost, - ProxyPort: proxyPort, - ApiVersion: apiVersion, + Port: port, + Fingerprint: fingerprint, + Sid: sid, + Server: server, + ProxyHost: proxyHost, + ProxyPort: proxyPort, + ApiVersion: apiVersion, IgnoreServerCertificate: ignoreServerCertificate, AcceptServerCertificate: acceptServerCertificate, - DebugFile: debugFile, - Context: context, - Timeout: timeout, - Sleep: sleep, - UserAgent: userAgent, - CloudMgmtId: cloudMgmtId, - AutoPublishBatchSize: autoPublishBatchSize, + DebugFile: debugFile, + Context: context, + Timeout: timeout, + Sleep: sleep, + UserAgent: userAgent, + CloudMgmtId: cloudMgmtId, + AutoPublishBatchSize: autoPublishBatchSize, } } diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore index 822fa09f..15586a2b 100644 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -1,9 +1,9 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile index 9fafd501..84fd743f 100644 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -1,18 +1,18 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md index 27d8ed56..ea7324da 100644 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -1,79 +1,79 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md index ec4f9098..e12da4db 100644 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -1,87 +1,87 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - - * Mar 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + + * Mar 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md index f0fbd2e5..bcb8c8d2 100644 --- a/vendor/github.com/mitchellh/copystructure/README.md +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -1,21 +1,21 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml index 8fc1261c..0637db72 100644 --- a/vendor/github.com/spf13/afero/.travis.yml +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -1,21 +1,21 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - +sudo: false +language: go + +go: + - 1.9 + - "1.10" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build + - go test -race -v ./... + diff --git a/vendor/modules.txt b/vendor/modules.txt index a27a8da3..729533ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -8,7 +8,7 @@ cloud.google.com/go/internal/trace cloud.google.com/go/internal/version # cloud.google.com/go/storage v1.10.0 cloud.google.com/go/storage -# github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.1 +# github.com/CheckPointSW/cp-mgmt-api-go-sdk v1.7.2 github.com/CheckPointSW/cp-mgmt-api-go-sdk/APIFiles # github.com/Masterminds/goutils v1.1.0 github.com/Masterminds/goutils