diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java index 4576f62f84b9..eb96df0eaba9 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Compute.java @@ -16,7 +16,18 @@ package com.google.gcloud.compute; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Joiner; +import com.google.common.base.MoreObjects; +import com.google.common.collect.Sets; +import com.google.gcloud.Page; import com.google.gcloud.Service; +import com.google.gcloud.spi.ComputeRpc; + +import java.io.Serializable; +import java.util.Objects; +import java.util.Set; /** * An interface for Google Cloud Compute Engine. @@ -24,4 +35,858 @@ * @see Google Cloud Compute Engine */ public interface Compute extends Service { + + /** + * Fields of a Compute Engine DiskType resource. + * + * @see Disk + * Type Resource + */ + enum DiskTypeField { + CREATION_TIMESTAMP("creationTimestamp"), + DEFAULT_DISK_SIZE_GB("defaultDiskSizeGb"), + DESCRIPTION("description"), + ID("id"), + NAME("name"), + SELF_LINK("selfLink"), + VALID_DISK_SIZE("validDiskSize"), + ZONE("zone"), + DEPRECATED("deprecated"); + + private final String selector; + + DiskTypeField(String selector) { + this.selector = selector; + } + + public String selector() { + return selector; + } + + static String selector(DiskTypeField... fields) { + Set fieldStrings = Sets.newHashSetWithExpectedSize(fields.length + 1); + fieldStrings.add(SELF_LINK.selector()); + for (DiskTypeField field : fields) { + fieldStrings.add(field.selector()); + } + return Joiner.on(',').join(fieldStrings); + } + } + + /** + * Fields of a Compute Engine MachineType resource. + * + * @see + * Machine Type Resource + */ + enum MachineTypeField { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + GUEST_CPUS("guestCpus"), + ID("id"), + IMAGE_SPACE_GB("imageSpaceGb"), + MAXIMUM_PERSISTENT_DISKS("maximumPersistentDisks"), + MAXIMUM_PERSISTENT_DISKS_SIZE_GB("maximumPersistentDisksSizeGb"), + MEMORY_MB("memoryMb"), + NAME("name"), + SCRATCH_DISKS("scratchDisks"), + SELF_LINK("selfLink"), + ZONE("zone"), + DEPRECATED("deprecated"); + + private final String selector; + + MachineTypeField(String selector) { + this.selector = selector; + } + + public String selector() { + return selector; + } + + static String selector(MachineTypeField... fields) { + Set fieldStrings = Sets.newHashSetWithExpectedSize(fields.length + 1); + fieldStrings.add(SELF_LINK.selector()); + for (MachineTypeField field : fields) { + fieldStrings.add(field.selector()); + } + return Joiner.on(',').join(fieldStrings); + } + } + + /** + * Fields of a Compute Engine Region resource. + * + * @see + * Region Resource + */ + enum RegionField { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + ID("id"), + NAME("name"), + QUOTAS("quotas"), + SELF_LINK("selfLink"), + STATUS("status"), + ZONES("zones"), + DEPRECATED("deprecated"); + + private final String selector; + + RegionField(String selector) { + this.selector = selector; + } + + public String selector() { + return selector; + } + + static String selector(RegionField... fields) { + Set fieldStrings = Sets.newHashSetWithExpectedSize(fields.length + 1); + fieldStrings.add(SELF_LINK.selector()); + for (RegionField field : fields) { + fieldStrings.add(field.selector()); + } + return Joiner.on(',').join(fieldStrings); + } + } + + /** + * Fields of a Compute Engine Zone resource. + * + * @see Zone + * Resource + */ + enum ZoneField { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + ID("id"), + MAINTENANCE_WINDOWS("maintenanceWindows"), + NAME("name"), + REGION("region"), + SELF_LINK("selfLink"), + STATUS("status"), + DEPRECATED("deprecated"); + + private final String selector; + + ZoneField(String selector) { + this.selector = selector; + } + + public String selector() { + return selector; + } + + static String selector(ZoneField... fields) { + Set fieldStrings = Sets.newHashSetWithExpectedSize(fields.length + 1); + fieldStrings.add(SELF_LINK.selector()); + for (ZoneField field : fields) { + fieldStrings.add(field.selector()); + } + return Joiner.on(',').join(fieldStrings); + } + } + + /** + * Fields of a Compute Engine License resource. + * + * @see License + * Resource + */ + enum LicenseField { + CHARGES_USE_FEE("chargesUseFee"), + NAME("name"), + SELF_LINK("selfLink"); + + private final String selector; + + LicenseField(String selector) { + this.selector = selector; + } + + public String selector() { + return selector; + } + + static String selector(LicenseField... fields) { + Set fieldStrings = Sets.newHashSetWithExpectedSize(fields.length + 1); + fieldStrings.add(SELF_LINK.selector()); + for (LicenseField field : fields) { + fieldStrings.add(field.selector()); + } + return Joiner.on(',').join(fieldStrings); + } + } + + /** + * Base class for list filters. + */ + abstract class ListFilter implements Serializable { + + private static final long serialVersionUID = -238638392811165127L; + + private final String field; + private final ComparisonOperator operator; + private final Object value; + + enum ComparisonOperator { + /** + * Defines an equality filter. + */ + EQ, + + /** + * Defines an inequality filter. + */ + NE + } + + ListFilter(String field, ComparisonOperator operator, Object value) { + this.field = field; + this.operator = operator; + this.value = value; + } + + @Override + public int hashCode() { + return Objects.hash(field, operator, value); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof ListFilter && toPb().equals(((ListFilter) obj).toPb()); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("field", field) + .add("operator", operator) + .add("value", value) + .toString(); + } + + String toPb() { + return field + ' ' + operator.name().toLowerCase() + ' ' + value.toString(); + } + } + + /** + * Class for filtering disk type lists. + */ + class DiskTypeFilter extends ListFilter { + + private static final long serialVersionUID = 4847837203592234453L; + + DiskTypeFilter(DiskTypeField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskTypeFilter equals(DiskTypeField field, String value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskTypeFilter notEquals(DiskTypeField field, String value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equality filter for the given field and long value. + */ + public static DiskTypeFilter equals(DiskTypeField field, long value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns an inequality filter for the given field and long value. + */ + public static DiskTypeFilter notEquals(DiskTypeField field, long value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering machine type lists. + */ + class MachineTypeFilter extends ListFilter { + + private static final long serialVersionUID = 7346062041571853235L; + + MachineTypeFilter(MachineTypeField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static MachineTypeFilter equals(MachineTypeField field, String value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static MachineTypeFilter notEquals(MachineTypeField field, String value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equality filter for the given field and long value. + */ + public static MachineTypeFilter equals(MachineTypeField field, long value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns an inequality filter for the given field and long value. + */ + public static MachineTypeFilter notEquals(MachineTypeField field, long value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering region lists. + */ + class RegionFilter extends ListFilter { + + private static final long serialVersionUID = 4464892812442567172L; + + RegionFilter(RegionField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static RegionFilter equals(RegionField field, String value) { + return new RegionFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static RegionFilter notEquals(RegionField field, String value) { + return new RegionFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + } + + /** + * Class for filtering zone lists. + */ + class ZoneFilter extends ListFilter { + + private static final long serialVersionUID = -3927428278548808737L; + + ZoneFilter(ZoneField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static ZoneFilter equals(ZoneField field, String value) { + return new ZoneFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns an equality filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static ZoneFilter notEquals(ZoneField field, String value) { + return new ZoneFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + } + + /** + * Class for specifying disk type get options. + */ + class DiskTypeOption extends Option { + + private static final long serialVersionUID = 7349162455602991554L; + + private DiskTypeOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the disk type's fields to be returned by the RPC call. If this + * option is not provided all disk type's fields are returned. {@code DiskTypeOption.fields} can + * be used to specify only the fields of interest. {@link DiskType#diskTypeId()} is always + * returned, even if not specified. + */ + public static DiskTypeOption fields(DiskTypeField... fields) { + return new DiskTypeOption(ComputeRpc.Option.FIELDS, DiskTypeField.selector(fields)); + } + } + + /** + * Class for specifying disk type list options. + */ + class DiskTypeListOption extends Option { + + private static final long serialVersionUID = 9051194230847610951L; + + private DiskTypeListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter to the disk types being listed. + */ + public static DiskTypeListOption filter(DiskTypeFilter filter) { + return new DiskTypeListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disk types to be returned. + */ + public static DiskTypeListOption maxResults(long maxResults) { + return new DiskTypeListOption(ComputeRpc.Option.MAX_RESULTS, maxResults); + } + + /** + * Returns an option to specify the page token from which to start listing disk types. + */ + public static DiskTypeListOption startPageToken(String pageToken) { + return new DiskTypeListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the disk type's fields to be returned by the RPC call. If this + * option is not provided all disk type's fields are returned. {@code DiskTypeListOption.fields} + * can be used to specify only the fields of interest. {@link DiskType#diskTypeId()} is always + * returned, even if not specified. + */ + public static DiskTypeListOption fields(DiskTypeField... fields) { + StringBuilder builder = new StringBuilder(); + builder.append("items(").append(DiskTypeField.selector(fields)).append("),nextPageToken"); + return new DiskTypeListOption(ComputeRpc.Option.FIELDS, builder.toString()); + } + } + + /** + * Class for specifying disk type aggregated list options. + */ + class DiskTypeAggregatedListOption extends Option { + + private static final long serialVersionUID = 7611137483018305170L; + + private DiskTypeAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter to the disk types being listed. + */ + public static DiskTypeAggregatedListOption filter(DiskTypeFilter filter) { + return new DiskTypeAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disk types to be returned. + */ + public static DiskTypeAggregatedListOption maxResults(long maxResults) { + return new DiskTypeAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, maxResults); + } + + /** + * Returns an option to specify the page token from which to start listing disk types. + */ + public static DiskTypeAggregatedListOption startPageToken(String pageToken) { + return new DiskTypeAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying machine type get options. + */ + class MachineTypeOption extends Option { + + private static final long serialVersionUID = 7349162455602991554L; + + private MachineTypeOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the machine type's fields to be returned by the RPC call. If + * this option is not provided all machine type's fields are returned. + * {@code MachineTypeOption.fields} can be used to specify only the fields of interest. + * {@link MachineType#machineTypeId()} is always returned, even if not specified. + */ + public static MachineTypeOption fields(MachineTypeField... fields) { + return new MachineTypeOption(ComputeRpc.Option.FIELDS, MachineTypeField.selector(fields)); + } + } + + /** + * Class for specifying machine type list options. + */ + class MachineTypeListOption extends Option { + + private static final long serialVersionUID = -2974553049419897902L; + + private MachineTypeListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter to the machine types being listed. + */ + public static MachineTypeListOption filter(MachineTypeFilter filter) { + return new MachineTypeListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of machine types to be returned. + */ + public static MachineTypeListOption maxResults(long maxResults) { + return new MachineTypeListOption(ComputeRpc.Option.MAX_RESULTS, maxResults); + } + + /** + * Returns an option to specify the page token from which to start listing machine types. + */ + public static MachineTypeListOption startPageToken(String pageToken) { + return new MachineTypeListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the machine type's fields to be returned by the RPC call. If + * this option is not provided all machine type's fields are returned. + * {@code MachineTypeListOption.fields} can be used to specify only the fields of interest. + * {@link MachineType#machineTypeId()} is always returned, even if not specified. + */ + public static MachineTypeListOption fields(MachineTypeField... fields) { + StringBuilder builder = new StringBuilder(); + builder.append("items(").append(MachineTypeField.selector(fields)).append("),nextPageToken"); + return new MachineTypeListOption(ComputeRpc.Option.FIELDS, builder.toString()); + } + } + + /** + * Class for specifying machine type aggregated list options. + */ + class MachineTypeAggregatedListOption extends Option { + + private static final long serialVersionUID = 8492257475500296057L; + + private MachineTypeAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter to the machine types being listed. + */ + public static MachineTypeAggregatedListOption filter(MachineTypeFilter filter) { + return new MachineTypeAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of machine types to be returned. + */ + public static MachineTypeAggregatedListOption maxResults(long maxResults) { + return new MachineTypeAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, maxResults); + } + + /** + * Returns an option to specify the page token from which to start listing machine types. + */ + public static MachineTypeAggregatedListOption startPageToken(String pageToken) { + return new MachineTypeAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying region get options. + */ + class RegionOption extends Option { + + private static final long serialVersionUID = 2025084807788610826L; + + private RegionOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the region's fields to be returned by the RPC call. If this + * option is not provided all region's fields are returned. {@code RegionOption.fields} can be + * used to specify only the fields of interest. {@link Region#regionId()} is always + * returned, even if not specified. + */ + public static RegionOption fields(RegionField... fields) { + return new RegionOption(ComputeRpc.Option.FIELDS, RegionField.selector(fields)); + } + } + + /** + * Class for specifying region list options. + */ + class RegionListOption extends Option { + + private static final long serialVersionUID = 3348089279267170211L; + + private RegionListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter to the regions being listed. + */ + public static RegionListOption filter(RegionFilter filter) { + return new RegionListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of regions to be returned. + */ + public static RegionListOption maxResults(long maxResults) { + return new RegionListOption(ComputeRpc.Option.MAX_RESULTS, maxResults); + } + + /** + * Returns an option to specify the page token from which to start listing regions. + */ + public static RegionListOption startPageToken(String pageToken) { + return new RegionListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the region's fields to be returned by the RPC call. If this + * option is not provided all region's fields are returned. {@code RegionListOption.fields} can + * be used to specify only the fields of interest. {@link Region#regionId()} is always + * returned, even if not specified. + */ + public static RegionListOption fields(RegionField... fields) { + StringBuilder builder = new StringBuilder(); + builder.append("items(").append(RegionField.selector(fields)).append("),nextPageToken"); + return new RegionListOption(ComputeRpc.Option.FIELDS, builder.toString()); + } + } + + /** + * Class for specifying zone get options. + */ + class ZoneOption extends Option { + + private static final long serialVersionUID = -2968652076389846258L; + + private ZoneOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the zone's fields to be returned by the RPC call. If this option + * is not provided all zone's fields are returned. {@code ZoneOption.fields} can be used to + * specify only the fields of interest. {@link Zone#zoneId()} is always returned, even if + * not specified. + */ + public static ZoneOption fields(ZoneField... fields) { + return new ZoneOption(ComputeRpc.Option.FIELDS, ZoneField.selector(fields)); + } + } + + /** + * Class for specifying zone list options. + */ + class ZoneListOption extends Option { + + private static final long serialVersionUID = -4721971371200905764L; + + private ZoneListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter to the zones being listed. + */ + public static ZoneListOption filter(ZoneFilter filter) { + return new ZoneListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of zones to be returned. + */ + public static ZoneListOption maxResults(long maxResults) { + return new ZoneListOption(ComputeRpc.Option.MAX_RESULTS, maxResults); + } + + /** + * Returns an option to specify the page token from which to start listing zones. + */ + public static ZoneListOption startPageToken(String pageToken) { + return new ZoneListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the zone's fields to be returned by the RPC call. If this option + * is not provided all zone's fields are returned. {@code ZoneListOption.fields} can be used to + * specify only the fields of interest. {@link Zone#zoneId()} is always returned, even if + * not specified. + */ + public static ZoneListOption fields(ZoneField... fields) { + StringBuilder builder = new StringBuilder(); + builder.append("items(").append(ZoneField.selector(fields)).append("),nextPageToken"); + return new ZoneListOption(ComputeRpc.Option.FIELDS, builder.toString()); + } + } + + /** + * Class for specifying license get options. + */ + class LicenseOption extends Option { + + private static final long serialVersionUID = -2968652076389846258L; + + private LicenseOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the license's fields to be returned by the RPC call. If this + * option is not provided all license's fields are returned. {@code LicenseOption.fields} can be + * used to specify only the fields of interest. {@link License#licenseId()} is always returned, + * even if not specified. + */ + public static LicenseOption fields(LicenseField... fields) { + return new LicenseOption(ComputeRpc.Option.FIELDS, LicenseField.selector(fields)); + } + } + + /** + * Returns the requested disk type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + DiskType getDiskType(DiskTypeId diskTypeId, DiskTypeOption... options); + + /** + * Returns the requested disk type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + DiskType getDiskType(String zone, String diskType, DiskTypeOption... options); + + /** + * Lists the disk types in the provided zone. + * + * @throws ComputeException upon failure + */ + Page listDiskTypes(String zone, DiskTypeListOption... options); + + /** + * Lists all disk types. + * + * @throws ComputeException upon failure + */ + Page listDiskTypes(DiskTypeAggregatedListOption... options); + + /** + * Returns the requested machine type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + MachineType getMachineType(MachineTypeId machineTypeId, MachineTypeOption... options); + + /** + * Returns the requested machine type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + MachineType getMachineType(String zone, String machineType, MachineTypeOption... options); + + /** + * Lists the machine types in the provided zone. + * + * @throws ComputeException upon failure + */ + Page listMachineTypes(String zone, MachineTypeListOption... options); + + /** + * Lists all machine types. + * + * @throws ComputeException upon failure + */ + Page listMachineTypes(MachineTypeAggregatedListOption... options); + + /** + * Returns the requested region or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Region getRegion(String region, RegionOption... options); + + /** + * Lists the regions. + * + * @throws ComputeException upon failure + */ + Page listRegions(RegionListOption... options); + + /** + * Returns the requested zone or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Zone getZone(String zone, ZoneOption... options); + + /** + * Lists the zones. + * + * @throws ComputeException upon failure + */ + Page listZones(ZoneListOption... options); + + /** + * Returns the requested license or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + License getLicense(String license, LicenseOption... options); + + /** + * Returns the requested license or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + License getLicense(LicenseId license, LicenseOption... options); } diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java index 8a3a82af8827..2087e570a349 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/ComputeImpl.java @@ -16,15 +16,460 @@ package com.google.gcloud.compute; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.gcloud.RetryHelper.runWithRetries; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Maps; import com.google.gcloud.BaseService; +import com.google.gcloud.Page; +import com.google.gcloud.PageImpl; +import com.google.gcloud.PageImpl.NextPageFetcher; +import com.google.gcloud.RetryHelper; import com.google.gcloud.spi.ComputeRpc; +import java.util.Map; +import java.util.concurrent.Callable; + final class ComputeImpl extends BaseService implements Compute { + private static class DiskTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -5253916264932522976L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + DiskTypePageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listDiskTypes(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedDiskTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -1664743503750307996L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedDiskTypePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listDiskTypes(serviceOptions, requestOptions); + } + } + + private static class MachineTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -5048133000517001933L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + MachineTypePageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listMachineTypes(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedMachineTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 2919227789802660026L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedMachineTypePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listMachineTypes(serviceOptions, requestOptions); + } + } + + private static class RegionPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 4180147045485258863L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + RegionPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listRegions(serviceOptions, requestOptions); + } + } + + private static class ZonePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -3946202621600687597L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + ZonePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listZones(serviceOptions, requestOptions); + } + } + private final ComputeRpc computeRpc; ComputeImpl(ComputeOptions options) { super(options); computeRpc = options.rpc(); } + + @Override + public DiskType getDiskType(final DiskTypeId diskTypeId, DiskTypeOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.DiskType answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.DiskType call() { + return computeRpc.getDiskType(diskTypeId.zone(), diskTypeId.diskType(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : DiskType.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public DiskType getDiskType(String zone, String diskType, DiskTypeOption... options) { + return getDiskType(DiskTypeId.of(zone, diskType), options); + } + + @Override + public Page listDiskTypes(String zone, DiskTypeListOption... options) { + return listDiskTypes(zone, options(), optionMap(options)); + } + + private static Page listDiskTypes(final String zone, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDiskTypes(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable diskTypes = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public DiskType apply(com.google.api.services.compute.model.DiskType diskType) { + return DiskType.fromPb(diskType); + } + }); + return new PageImpl<>(new DiskTypePageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, diskTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listDiskTypes(DiskTypeAggregatedListOption... options) { + return listDiskTypes(options(), optionMap(options)); + } + + private static Page listDiskTypes(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDiskTypes(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable diskTypes = Iterables.transform(result.y(), + new Function() { + @Override + public DiskType apply(com.google.api.services.compute.model.DiskType diskType) { + return DiskType.fromPb(diskType); + } + }); + return new PageImpl<>(new AggregatedDiskTypePageFetcher(serviceOptions, cursor, optionsMap), + cursor, diskTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public MachineType getMachineType(final MachineTypeId machineType, MachineTypeOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.MachineType answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.MachineType call() { + return computeRpc.getMachineType(machineType.zone(), machineType.machineType(), + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : MachineType.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public MachineType getMachineType(String zone, String machineType, MachineTypeOption... options) { + return getMachineType(MachineTypeId.of(zone, machineType), options); + } + + @Override + public Page listMachineTypes(String zone, MachineTypeListOption... options) { + return listMachineTypes(zone, options(), optionMap(options)); + } + + private static Page listMachineTypes(final String zone, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listMachineTypes(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable machineTypes = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public MachineType apply( + com.google.api.services.compute.model.MachineType machineType) { + return MachineType.fromPb(machineType); + } + }); + return new PageImpl<>(new MachineTypePageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, machineTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listMachineTypes(MachineTypeAggregatedListOption... options) { + return listMachineTypes(options(), optionMap(options)); + } + + private static Page listMachineTypes(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listMachineTypes(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable machineTypes = Iterables.transform(result.y(), + new Function() { + @Override + public MachineType apply( + com.google.api.services.compute.model.MachineType machineType) { + return MachineType.fromPb(machineType); + } + }); + return new PageImpl<>( + new AggregatedMachineTypePageFetcher(serviceOptions, cursor, optionsMap), cursor, + machineTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Region getRegion(final String region, RegionOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Region answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Region call() { + return computeRpc.getRegion(region, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Region.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listRegions(RegionListOption... options) { + return listRegions(options(), optionMap(options)); + } + + private static Page listRegions(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listRegions(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable regions = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Region apply(com.google.api.services.compute.model.Region region) { + return Region.fromPb(region); + } + }); + return new PageImpl<>(new RegionPageFetcher(serviceOptions, cursor, optionsMap), cursor, + regions); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Zone getZone(final String zone, ZoneOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Zone answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Zone call() { + return computeRpc.getZone(zone, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Zone.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listZones(ZoneListOption... options) { + return listZones(options(), optionMap(options)); + } + + private static Page listZones(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listZones(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable zones = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Zone apply(com.google.api.services.compute.model.Zone zone) { + return Zone.fromPb(zone); + } + }); + return new PageImpl<>(new ZonePageFetcher(serviceOptions, cursor, optionsMap), cursor, zones); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public License getLicense(String license, LicenseOption... options) { + return getLicense(LicenseId.of(license), options); + } + + @Override + public License getLicense(LicenseId license, LicenseOption... options) { + final LicenseId completeId = license.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.License answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.License call() { + return computeRpc.getLicense(completeId.project(), completeId.license(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : License.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private Map optionMap(Option... options) { + Map optionMap = Maps.newEnumMap(ComputeRpc.Option.class); + for (Option option : options) { + Object prev = optionMap.put(option.rpcOption(), option.value()); + checkArgument(prev == null, "Duplicate option %s", option); + } + return optionMap; + } } diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Option.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Option.java new file mode 100644 index 000000000000..ae100c9c9765 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/Option.java @@ -0,0 +1,72 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; +import com.google.gcloud.spi.ComputeRpc; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Base class for Compute operation option. + */ +class Option implements Serializable { + + private static final long serialVersionUID = 4116849309806774350L; + + private final ComputeRpc.Option rpcOption; + private final Object value; + + Option(ComputeRpc.Option rpcOption, Object value) { + this.rpcOption = checkNotNull(rpcOption); + this.value = value; + } + + ComputeRpc.Option rpcOption() { + return rpcOption; + } + + Object value() { + return value; + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Option)) { + return false; + } + Option other = (Option) obj; + return Objects.equals(rpcOption, other.rpcOption) + && Objects.equals(value, other.value); + } + + @Override + public int hashCode() { + return Objects.hash(rpcOption, value); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", rpcOption.value()) + .add("value", value) + .toString(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/testing/RemoteComputeHelper.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/testing/RemoteComputeHelper.java new file mode 100644 index 000000000000..266b31cd7c93 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/testing/RemoteComputeHelper.java @@ -0,0 +1,117 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.compute.testing; + +import com.google.gcloud.AuthCredentials; +import com.google.gcloud.RetryParams; +import com.google.gcloud.compute.ComputeOptions; + +import java.io.IOException; +import java.io.InputStream; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Utility to create a remote Compute configuration for testing. Compute options can be obtained + * via the {@link #options()} method. Returned options have custom + * {@link ComputeOptions#retryParams()}: {@link RetryParams#retryMaxAttempts()} is {@code 10}, + * {@link RetryParams#retryMinAttempts()} is {@code 6}, {@link RetryParams#maxRetryDelayMillis()} is + * {@code 30000}, {@link RetryParams#totalRetryPeriodMillis()} is {@code 120000} and + * {@link RetryParams#initialRetryDelayMillis()} is {@code 250}. + * {@link ComputeOptions#connectTimeout()} and {@link ComputeOptions#readTimeout()} are both set to + * {@code 60000}. + */ +public class RemoteComputeHelper { + + private static final Logger log = Logger.getLogger(RemoteComputeHelper.class.getName()); + private final ComputeOptions options; + + private RemoteComputeHelper(ComputeOptions options) { + this.options = options; + } + + /** + * Returns a {@link ComputeOptions} object to be used for testing. + */ + public ComputeOptions options() { + return options; + } + + /** + * Creates a {@code RemoteComputeHelper} object for the given project id and JSON key input + * stream. + * + * @param projectId id of the project to be used for running the tests + * @param keyStream input stream for a JSON key + * @return A {@code RemoteComputeHelper} object for the provided options + * @throws ComputeHelperException if {@code keyStream} is not a valid JSON key stream + */ + public static RemoteComputeHelper create(String projectId, InputStream keyStream) + throws ComputeHelperException { + try { + ComputeOptions computeOptions = ComputeOptions.builder() + .authCredentials(AuthCredentials.createForJson(keyStream)) + .projectId(projectId) + .retryParams(retryParams()) + .connectTimeout(60000) + .readTimeout(60000) + .build(); + return new RemoteComputeHelper(computeOptions); + } catch (IOException ex) { + if (log.isLoggable(Level.WARNING)) { + log.log(Level.WARNING, ex.getMessage()); + } + throw ComputeHelperException.translate(ex); + } + } + + /** + * Creates a {@code RemoteComputeHelper} object using default project id and authentication + * credentials. + */ + public static RemoteComputeHelper create() { + ComputeOptions computeOptions = ComputeOptions.builder() + .retryParams(retryParams()) + .connectTimeout(60000) + .readTimeout(60000) + .build(); + return new RemoteComputeHelper(computeOptions); + } + + private static RetryParams retryParams() { + return RetryParams.builder() + .retryMaxAttempts(10) + .retryMinAttempts(6) + .maxRetryDelayMillis(30000) + .totalRetryPeriodMillis(120000) + .initialRetryDelayMillis(250) + .build(); + } + + public static class ComputeHelperException extends RuntimeException { + + private static final long serialVersionUID = -5747977015007639912L; + + public ComputeHelperException(String message, Throwable cause) { + super(message, cause); + } + + public static ComputeHelperException translate(Exception ex) { + return new ComputeHelperException(ex.getMessage(), ex); + } + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/compute/testing/package-info.java b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/testing/package-info.java new file mode 100644 index 000000000000..86f1c2428cde --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/compute/testing/package-info.java @@ -0,0 +1,31 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A testing helper for Google Compute Engine. + * + *

A simple usage example: + * + *

Before the test: + *

 {@code
+ * RemoteComputeHelper computeHelper = RemoteComputeHelper.create();
+ * Compute compute = computeHelper.options().service();
+ * } 
+ * + * @see + * gcloud-java tools for testing + */ +package com.google.gcloud.compute.testing; diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java index b7a59a9413c4..35524e0c116d 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/ComputeRpc.java @@ -16,5 +16,149 @@ package com.google.gcloud.spi; +import com.google.api.services.compute.model.DiskType; +import com.google.api.services.compute.model.License; +import com.google.api.services.compute.model.MachineType; +import com.google.api.services.compute.model.Region; +import com.google.api.services.compute.model.Zone; +import com.google.gcloud.compute.ComputeException; + +import java.util.Map; + public interface ComputeRpc { + + // These options are part of the Google Compute Engine query parameters + enum Option { + FIELDS("fields"), + MAX_RESULTS("maxResults"), + PAGE_TOKEN("pageToken"), + FILTER("filter"); + + private final String value; + + Option(String value) { + this.value = value; + } + + public String value() { + return value; + } + + @SuppressWarnings("unchecked") + T get(Map options) { + return (T) options.get(this); + } + + String getString(Map options) { + return get(options); + } + + Long getLong(Map options) { + return get(options); + } + + Boolean getBoolean(Map options) { + return get(options); + } + } + + class Tuple { + + private final X x; + private final Y y; + + private Tuple(X x, Y y) { + this.x = x; + this.y = y; + } + + public static Tuple of(X x, Y y) { + return new Tuple<>(x, y); + } + + public X x() { + return x; + } + + public Y y() { + return y; + } + } + + /** + * Returns the requested disk type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + DiskType getDiskType(String zone, String diskType, Map options); + + /** + * Lists the disk types in the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listDiskTypes(String zone, Map options); + + /** + * Lists all disk types. + * + * @throws ComputeException upon failure + */ + Tuple> listDiskTypes(Map options); + + /** + * Returns the requested machine type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + MachineType getMachineType(String zone, String diskType, Map options); + + /** + * Lists the machine types in the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listMachineTypes(String zone, Map options); + + /** + * Lists all machine types. + * + * @throws ComputeException upon failure + */ + Tuple> listMachineTypes(Map options); + + /** + * Returns the requested region or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Region getRegion(String region, Map options); + + /** + * Lists the regions. + * + * @throws ComputeException upon failure + */ + Tuple> listRegions(Map options); + + /** + * Returns the requested zone or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Zone getZone(String zone, Map options); + + /** + * Lists the zones. + * + * @throws ComputeException upon failure + */ + Tuple> listZones(Map options); + + /** + * Returns the requested license or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + License getLicense(String project, String license, Map options); } diff --git a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java index 6667588dd0ef..fe714acab502 100644 --- a/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java +++ b/gcloud-java-compute/src/main/java/com/google/gcloud/spi/DefaultComputeRpc.java @@ -16,14 +16,35 @@ package com.google.gcloud.spi; +import static com.google.gcloud.spi.ComputeRpc.Option.FIELDS; +import static com.google.gcloud.spi.ComputeRpc.Option.FILTER; +import static com.google.gcloud.spi.ComputeRpc.Option.MAX_RESULTS; +import static com.google.gcloud.spi.ComputeRpc.Option.PAGE_TOKEN; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; + import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.json.jackson.JacksonFactory; import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.DiskType; +import com.google.api.services.compute.model.DiskTypeAggregatedList; +import com.google.api.services.compute.model.DiskTypeList; +import com.google.api.services.compute.model.DiskTypesScopedList; +import com.google.api.services.compute.model.License; +import com.google.api.services.compute.model.MachineType; +import com.google.api.services.compute.model.MachineTypeAggregatedList; +import com.google.api.services.compute.model.MachineTypeList; +import com.google.api.services.compute.model.MachineTypesScopedList; +import com.google.api.services.compute.model.Region; +import com.google.api.services.compute.model.RegionList; +import com.google.api.services.compute.model.Zone; +import com.google.api.services.compute.model.ZoneList; +import com.google.common.collect.ImmutableList; import com.google.gcloud.compute.ComputeException; import com.google.gcloud.compute.ComputeOptions; import java.io.IOException; +import java.util.Map; public class DefaultComputeRpc implements ComputeRpc { @@ -43,4 +64,201 @@ public DefaultComputeRpc(ComputeOptions options) { private static ComputeException translate(IOException exception) { return new ComputeException(exception); } + + @Override + public DiskType getDiskType(String zone, String diskType, Map options) { + try { + return compute.diskTypes() + .get(this.options.projectId(), zone, diskType) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listDiskTypes(String zone, Map options) { + try { + DiskTypeList diskTypesList = compute.diskTypes() + .list(this.options.projectId(), zone) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + .setFields(FIELDS.getString(options)) + .execute(); + Iterable diskTypes = diskTypesList.getItems(); + return Tuple.of(diskTypesList.getNextPageToken(), diskTypes); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listDiskTypes(Map options) { + try { + DiskTypeAggregatedList aggregatedList = compute.diskTypes() + .aggregatedList(this.options.projectId()) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (DiskTypesScopedList diskTypesScopedList : scopedList.values()) { + if (diskTypesScopedList.getDiskTypes() != null) { + builder.addAll(diskTypesScopedList.getDiskTypes()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public MachineType getMachineType(String zone, String machineType, Map options) { + try { + return compute.machineTypes() + .get(this.options.projectId(), zone, machineType) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listMachineTypes(String zone, + Map options) { + try { + MachineTypeList machineTypesList = compute.machineTypes() + .list(this.options.projectId(), zone) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + .setFields(FIELDS.getString(options)) + .execute(); + Iterable machineTypes = machineTypesList.getItems(); + return Tuple.of(machineTypesList.getNextPageToken(), machineTypes); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listMachineTypes(Map options) { + try { + MachineTypeAggregatedList aggregatedList = compute.machineTypes() + .aggregatedList(this.options.projectId()) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (MachineTypesScopedList machineTypesScopedList : scopedList.values()) { + if (machineTypesScopedList.getMachineTypes() != null) { + builder.addAll(machineTypesScopedList.getMachineTypes()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Region getRegion(String region, Map options) { + try { + return compute.regions() + .get(this.options.projectId(), region) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listRegions(Map options) { + try { + RegionList regionsList = compute.regions() + .list(this.options.projectId()) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + .setFields(FIELDS.getString(options)) + .execute(); + Iterable regions = regionsList.getItems(); + return Tuple.of(regionsList.getNextPageToken(), regions); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Zone getZone(String zone, Map options) { + try { + return compute.zones() + .get(this.options.projectId(), zone) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listZones(Map options) { + try { + ZoneList zonesList = compute.zones() + .list(this.options.projectId()) + .setFilter(FILTER.getString(options)) + .setMaxResults(MAX_RESULTS.getLong(options)) + .setPageToken(PAGE_TOKEN.getString(options)) + .setFields(FIELDS.getString(options)) + .execute(); + Iterable zones = zonesList.getItems(); + return Tuple.of(zonesList.getNextPageToken(), zones); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public License getLicense(String project, String license, Map options) { + try { + return compute.licenses() + .get(project, license) + .setFields(FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + /** + * This method returns {@code null} if the error code of {@code exception} was 404, re-throws the + * exception otherwise. + * + * @throws ComputeException if the error code of {@code exception} was not 404. + */ + private static T nullForNotFound(IOException exception) { + ComputeException serviceException = translate(exception); + if (serviceException.code() == HTTP_NOT_FOUND) { + return (T) null; + } + throw serviceException; + } } diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java new file mode 100644 index 000000000000..5ef9b04ed446 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/ComputeImplTest.java @@ -0,0 +1,714 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.compute; + +import static org.easymock.EasyMock.capture; +import static org.easymock.EasyMock.eq; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; +import com.google.gcloud.Page; +import com.google.gcloud.RetryParams; +import com.google.gcloud.compute.Zone.MaintenanceWindow; +import com.google.gcloud.spi.ComputeRpc; +import com.google.gcloud.spi.ComputeRpc.Tuple; +import com.google.gcloud.spi.ComputeRpcFactory; + +import org.easymock.Capture; +import org.easymock.EasyMock; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.List; +import java.util.Map; + +public class ComputeImplTest { + + private static final String PROJECT = "project"; + private static final String ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final String VALID_DISK_SIZE = "10GB-10TB"; + private static final Long DEFAULT_DISK_SIZE_GB = 10L; + private static final DiskTypeId DISK_TYPE_ID = DiskTypeId.of("project", "zone", "diskType"); + private static final DiskType DISK_TYPE = DiskType.builder() + .id(ID) + .diskTypeId(DISK_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .validDiskSize(VALID_DISK_SIZE) + .defaultDiskSizeGb(DEFAULT_DISK_SIZE_GB) + .build(); + private static final MachineTypeId MACHINE_TYPE_ID = MachineTypeId.of("project", "zone", "type"); + private static final Integer GUEST_CPUS = 1; + private static final Integer MEMORY_MB = 2; + private static final List SCRATCH_DISKS = ImmutableList.of(3); + private static final Integer MAXIMUM_PERSISTENT_DISKS = 4; + private static final Long MAXIMUM_PERSISTENT_DISKS_SIZE_GB = 5L; + private static final MachineType MACHINE_TYPE = MachineType.builder() + .id(ID) + .machineTypeId(MACHINE_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .cpus(GUEST_CPUS) + .memoryMb(MEMORY_MB) + .scratchDisksSizeGb(SCRATCH_DISKS) + .maximumPersistentDisks(MAXIMUM_PERSISTENT_DISKS) + .maximumPersistentDisksSizeGb(MAXIMUM_PERSISTENT_DISKS_SIZE_GB) + .build(); + private static final RegionId REGION_ID = RegionId.of("project", "region"); + private static final Region.Status REGION_STATUS = Region.Status.DOWN; + private static final ZoneId ZONE_ID1 = ZoneId.of("project", "zone1"); + private static final ZoneId ZONE_ID2 = ZoneId.of("project", "zone2"); + private static final List ZONES = ImmutableList.of(ZONE_ID1, ZONE_ID2); + private static final Region.Quota QUOTA1 = + new Region.Quota("METRIC1", 2, 1); + private static final Region.Quota QUOTA2 = + new Region.Quota("METRIC2", 4, 3); + private static final List QUOTAS = ImmutableList.of(QUOTA1, QUOTA2); + private static final Region REGION = Region.builder() + .regionId(REGION_ID) + .id(ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(REGION_STATUS) + .zones(ZONES) + .quotas(QUOTAS) + .build(); + private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); + private static final Zone.Status ZONE_STATUS = Zone.Status.DOWN; + private static final MaintenanceWindow WINDOW1 = new MaintenanceWindow("NAME1", "DESCRIPTION1", + 1453293420000L, 1453293480000L); + private static final MaintenanceWindow WINDOW2 = new MaintenanceWindow("NAME2", "DESCRIPTION2", + 1453293420000L, 1453293480000L); + private static final List WINDOWS = ImmutableList.of(WINDOW1, WINDOW2); + private static final Zone ZONE = Zone.builder() + .zoneId(ZONE_ID) + .id(ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(ZONE_STATUS) + .maintenanceWindows(WINDOWS) + .region(REGION_ID) + .build(); + private static final LicenseId LICENSE_ID = LicenseId.of("project", "license"); + private static final Boolean CHARGES_USE_FEE = true; + private static final License LICENSE = new License(LICENSE_ID, CHARGES_USE_FEE); + + // Empty ComputeRpc options + private static final Map EMPTY_RPC_OPTIONS = ImmutableMap.of(); + + // DiskType options + private static final Compute.DiskTypeOption DISK_TYPE_OPTION_FIELDS = + Compute.DiskTypeOption.fields(Compute.DiskTypeField.ID, Compute.DiskTypeField.DESCRIPTION); + + // DiskType list options + private static final Compute.DiskTypeFilter DISK_TYPE_FILTER = + Compute.DiskTypeFilter.equals(Compute.DiskTypeField.DESCRIPTION, "someDescription"); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_PAGE_TOKEN = + Compute.DiskTypeListOption.startPageToken("cursor"); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_MAX_RESULTS = + Compute.DiskTypeListOption.maxResults(42L); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_FILTER = + Compute.DiskTypeListOption.filter(DISK_TYPE_FILTER); + private static final Map DISK_TYPE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "description eq someDescription"); + + // DiskType aggregated list options + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_PAGE_TOKEN = + Compute.DiskTypeAggregatedListOption.startPageToken("cursor"); + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_MAX_RESULTS = + Compute.DiskTypeAggregatedListOption.maxResults(42L); + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_FILTER = + Compute.DiskTypeAggregatedListOption.filter(DISK_TYPE_FILTER); + + // MachineType options + private static final Compute.MachineTypeOption MACHINE_TYPE_OPTION_FIELDS = + Compute.MachineTypeOption.fields(Compute.MachineTypeField.ID, + Compute.MachineTypeField.DESCRIPTION); + + // MachineType list options + private static final Compute.MachineTypeFilter MACHINE_TYPE_FILTER = + Compute.MachineTypeFilter.notEquals(Compute.MachineTypeField.MAXIMUM_PERSISTENT_DISKS, 42L); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_PAGE_TOKEN = + Compute.MachineTypeListOption.startPageToken("cursor"); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_MAX_RESULTS = + Compute.MachineTypeListOption.maxResults(42L); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_FILTER = + Compute.MachineTypeListOption.filter(MACHINE_TYPE_FILTER); + private static final Map MACHINE_TYPE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "maximumPersistentDisks ne 42"); + + // MachineType aggregated list options + private static final Compute.MachineTypeAggregatedListOption + MACHINE_TYPE_AGGREGATED_LIST_PAGE_TOKEN = + Compute.MachineTypeAggregatedListOption.startPageToken("cursor"); + private static final Compute.MachineTypeAggregatedListOption + MACHINE_TYPE_AGGREGATED_LIST_MAX_RESULTS = + Compute.MachineTypeAggregatedListOption.maxResults(42L); + private static final Compute.MachineTypeAggregatedListOption MACHINE_TYPE_AGGREGATED_LIST_FILTER = + Compute.MachineTypeAggregatedListOption.filter(MACHINE_TYPE_FILTER); + + // Region options + private static final Compute.RegionOption REGION_OPTION_FIELDS = + Compute.RegionOption.fields(Compute.RegionField.ID, Compute.RegionField.DESCRIPTION); + + // Region list options + private static final Compute.RegionFilter REGION_FILTER = + Compute.RegionFilter.equals(Compute.RegionField.ID, "someId"); + private static final Compute.RegionListOption REGION_LIST_PAGE_TOKEN = + Compute.RegionListOption.startPageToken("cursor"); + private static final Compute.RegionListOption REGION_LIST_MAX_RESULTS = + Compute.RegionListOption.maxResults(42L); + private static final Compute.RegionListOption REGION_LIST_FILTER = + Compute.RegionListOption.filter(REGION_FILTER); + private static final Map REGION_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "id eq someId"); + + // Zone options + private static final Compute.ZoneOption ZONE_OPTION_FIELDS = + Compute.ZoneOption.fields(Compute.ZoneField.ID, Compute.ZoneField.DESCRIPTION); + + // Zone list options + private static final Compute.ZoneFilter ZONE_FILTER = + Compute.ZoneFilter.notEquals(Compute.ZoneField.NAME, "someName"); + private static final Compute.ZoneListOption ZONE_LIST_PAGE_TOKEN = + Compute.ZoneListOption.startPageToken("cursor"); + private static final Compute.ZoneListOption ZONE_LIST_MAX_RESULTS = + Compute.ZoneListOption.maxResults(42L); + private static final Compute.ZoneListOption ZONE_LIST_FILTER = + Compute.ZoneListOption.filter(ZONE_FILTER); + private static final Map ZONE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "name ne someName"); + + // License options + private static final Compute.LicenseOption LICENSE_OPTION_FIELDS = + Compute.LicenseOption.fields(Compute.LicenseField.CHARGES_USE_FEE); + + private ComputeOptions options; + private ComputeRpcFactory rpcFactoryMock; + private ComputeRpc computeRpcMock; + private Compute compute; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Before + public void setUp() { + rpcFactoryMock = EasyMock.createMock(ComputeRpcFactory.class); + computeRpcMock = EasyMock.createMock(ComputeRpc.class); + EasyMock.expect(rpcFactoryMock.create(EasyMock.anyObject(ComputeOptions.class))) + .andReturn(computeRpcMock).times(1); + EasyMock.replay(rpcFactoryMock); + options = ComputeOptions.builder() + .projectId(PROJECT) + .serviceRpcFactory(rpcFactoryMock) + .retryParams(RetryParams.noRetries()) + .build(); + } + + @After + public void tearDown() { + EasyMock.verify(rpcFactoryMock, computeRpcMock); + } + + @Test + public void testGetOptions() { + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertSame(options, compute.options()); + } + + @Test + public void testGetDiskType() { + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.diskType(), EMPTY_RPC_OPTIONS)) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskType diskType = compute.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.diskType()); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testGetDiskTypeFromDiskTypeId() { + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.diskType(), EMPTY_RPC_OPTIONS)) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskType diskType = compute.getDiskType(DISK_TYPE_ID); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testGetDiskTypeWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.getDiskType( + eq(DISK_TYPE_ID.zone()), eq(DISK_TYPE_ID.diskType()), capture(capturedOptions))) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskType diskType = + compute.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.diskType(), DISK_TYPE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(DISK_TYPE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testListDiskTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testListEmptyDiskTypes() { + ImmutableList diskTypes = ImmutableList.of(); + Tuple> result = + Tuple.>of(null, diskTypes); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(diskTypes.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testListDiskTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), DISK_TYPE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone(), DISK_TYPE_LIST_MAX_RESULTS, + DISK_TYPE_LIST_PAGE_TOKEN, DISK_TYPE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListDiskTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListEmptyDiskTypes() { + ImmutableList diskTypes = ImmutableList.of(); + Tuple> result = + Tuple.>of(null, diskTypes); + EasyMock.expect(computeRpcMock.listDiskTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listDiskTypes(); + assertNull(page.nextPageCursor()); + assertArrayEquals(diskTypes.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListDiskTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_AGGREGATED_LIST_MAX_RESULTS, + DISK_TYPE_AGGREGATED_LIST_PAGE_TOKEN, DISK_TYPE_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testGetMachineType() { + EasyMock.expect( + computeRpcMock.getMachineType( + MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.machineType(), EMPTY_RPC_OPTIONS)) + .andReturn(MACHINE_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + MachineType machineType = + compute.getMachineType(MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.machineType()); + assertEquals(MACHINE_TYPE, machineType); + } + + @Test + public void testGetMachineTypeFromMachineTypeId() { + EasyMock.expect(computeRpcMock.getMachineType( + MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.machineType(), EMPTY_RPC_OPTIONS)) + .andReturn(MACHINE_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + MachineType machineType = compute.getMachineType(MACHINE_TYPE_ID); + assertEquals(MACHINE_TYPE, machineType); + } + + @Test + public void testGetMachineTypeWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.getMachineType(eq(MACHINE_TYPE_ID.zone()), eq(MACHINE_TYPE_ID.machineType()), + capture(capturedOptions))) + .andReturn(MACHINE_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + MachineType machineType = compute.getMachineType(MACHINE_TYPE_ID.zone(), + MACHINE_TYPE_ID.machineType(), MACHINE_TYPE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(DISK_TYPE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(MACHINE_TYPE, machineType); + } + + @Test + public void testListMachineTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), Iterables.toArray(page.values(), + MachineType.class)); + } + + @Test + public void testListEmptyMachineTypes() { + ImmutableList machineTypes = + ImmutableList.of(); + Tuple> result = + Tuple.>of(null, + machineTypes); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(machineTypes.toArray(), Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testListMachineTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), MACHINE_TYPE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone(), + MACHINE_TYPE_LIST_MAX_RESULTS, MACHINE_TYPE_LIST_PAGE_TOKEN, MACHINE_TYPE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testAggregatedListMachineTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listMachineTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), Iterables.toArray(page.values(), + MachineType.class)); + } + + @Test + public void testAggregatedListEmptyMachineTypes() { + ImmutableList machineTypes = + ImmutableList.of(); + Tuple> result = + Tuple.>of(null, + machineTypes); + EasyMock.expect(computeRpcMock.listMachineTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listMachineTypes(); + assertNull(page.nextPageCursor()); + assertArrayEquals(machineTypes.toArray(), Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testAggregatedListMachineTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_AGGREGATED_LIST_MAX_RESULTS, + MACHINE_TYPE_AGGREGATED_LIST_PAGE_TOKEN, MACHINE_TYPE_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testGetRegion() { + EasyMock.expect(computeRpcMock.getRegion(REGION_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(REGION.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Region region = compute.getRegion(REGION_ID.region()); + assertEquals(REGION, region); + } + + @Test + public void testGetRegionWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getRegion(eq(REGION_ID.region()), capture(capturedOptions))) + .andReturn(REGION.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Region region = compute.getRegion(REGION_ID.region(), REGION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(REGION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(REGION, region); + } + + @Test + public void testListRegions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList regionList = ImmutableList.of(REGION, REGION); + Tuple> result = + Tuple.of(cursor, Iterables.transform(regionList, Region.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listRegions(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegions(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(regionList.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testListEmptyRegions() { + ImmutableList regions = ImmutableList.of(); + Tuple> result = + Tuple.>of(null, + regions); + EasyMock.expect(computeRpcMock.listRegions(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listRegions(); + assertNull(page.nextPageCursor()); + assertArrayEquals(regions.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testListRegionsWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList regionList = ImmutableList.of(REGION, REGION); + Tuple> result = + Tuple.of(cursor, Iterables.transform(regionList, Region.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listRegions(REGION_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegions(REGION_LIST_MAX_RESULTS, REGION_LIST_PAGE_TOKEN, + REGION_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(regionList.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testGetZone() { + EasyMock.expect(computeRpcMock.getZone(ZONE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(ZONE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Zone zone = compute.getZone(ZONE_ID.zone()); + assertEquals(ZONE, zone); + } + + @Test + public void testGetZoneWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getZone(eq(ZONE_ID.zone()), capture(capturedOptions))) + .andReturn(ZONE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Zone zone = compute.getZone(ZONE_ID.zone(), ZONE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(ZONE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(ZONE, zone); + } + + @Test + public void testListZones() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList zoneList = ImmutableList.of(ZONE, ZONE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(zoneList, Zone.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listZones(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listZones(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(zoneList.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testListEmptyZones() { + ImmutableList zones = ImmutableList.of(); + Tuple> result = + Tuple.>of(null, zones); + EasyMock.expect(computeRpcMock.listZones(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listZones(); + assertNull(page.nextPageCursor()); + assertArrayEquals(zones.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testListZonesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList zoneList = ImmutableList.of(ZONE, ZONE); + Tuple> result = + Tuple.of(cursor, Iterables.transform(zoneList, Zone.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listZones(ZONE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = + compute.listZones(ZONE_LIST_MAX_RESULTS, ZONE_LIST_PAGE_TOKEN, ZONE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(zoneList.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testGetLicenseFromString() { + EasyMock.expect(computeRpcMock.getLicense(PROJECT, LICENSE_ID.license(), EMPTY_RPC_OPTIONS)) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(LICENSE_ID.license()); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromStringWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.getLicense(eq(PROJECT), eq(LICENSE_ID.license()), capture(capturedOptions))) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(LICENSE_ID.license(), LICENSE_OPTION_FIELDS); + assertEquals(LICENSE, license); + String selector = (String) capturedOptions.getValue().get(LICENSE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("chargesUseFee")); + assertEquals(22, selector.length()); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromId() { + LicenseId licenseId = LicenseId.of("project2", "license2"); + EasyMock.expect( + computeRpcMock.getLicense(licenseId.project(), licenseId.license(), EMPTY_RPC_OPTIONS)) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(licenseId); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromIdWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + LicenseId licenseId = LicenseId.of("project2", "license2"); + EasyMock.expect(computeRpcMock.getLicense(eq(licenseId.project()), eq(licenseId.license()), + capture(capturedOptions))) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(licenseId, LICENSE_OPTION_FIELDS); + assertEquals(LICENSE, license); + String selector = (String) capturedOptions.getValue().get(LICENSE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("chargesUseFee")); + assertEquals(22, selector.length()); + assertEquals(LICENSE, license); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java new file mode 100644 index 000000000000..5bc2589e6244 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/gcloud/compute/it/ITComputeTest.java @@ -0,0 +1,450 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.compute.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.gcloud.Page; +import com.google.gcloud.compute.Compute; +import com.google.gcloud.compute.DiskType; +import com.google.gcloud.compute.License; +import com.google.gcloud.compute.LicenseId; +import com.google.gcloud.compute.MachineType; +import com.google.gcloud.compute.Region; +import com.google.gcloud.compute.Zone; +import com.google.gcloud.compute.testing.RemoteComputeHelper; + +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; + +import java.util.Iterator; + +public class ITComputeTest { + + private static final String REGION = "us-central1"; + private static final String ZONE = "us-central1-a"; + private static final String DISK_TYPE = "local-ssd"; + private static final String MACHINE_TYPE = "f1-micro"; + private static final LicenseId LICENSE_ID = LicenseId.of("ubuntu-os-cloud", "ubuntu-1404-trusty"); + + private static Compute compute; + + @Rule + public Timeout globalTimeout = Timeout.seconds(300); + + @BeforeClass + public static void beforeClass() throws InterruptedException { + RemoteComputeHelper computeHelper = RemoteComputeHelper.create(); + compute = computeHelper.options().service(); + } + + @Test + public void testGetDiskType() { + DiskType diskType = compute.getDiskType(ZONE, DISK_TYPE); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.id()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertEquals(DISK_TYPE, diskType.diskTypeId().diskType()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotNull(diskType.defaultDiskSizeGb()); + } + + @Test + public void testGetDiskTypeWithSelectedFields() { + DiskType diskType = compute.getDiskType(ZONE, DISK_TYPE, + Compute.DiskTypeOption.fields(Compute.DiskTypeField.CREATION_TIMESTAMP)); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.id()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertEquals(DISK_TYPE, diskType.diskTypeId().diskType()); + assertNotNull(diskType.creationTimestamp()); + assertNull(diskType.description()); + assertNull(diskType.validDiskSize()); + assertNull(diskType.defaultDiskSizeGb()); + } + + @Test + public void testListDiskTypes() { + Page diskPage = compute.listDiskTypes(ZONE); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while(diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.id()); + assertNotNull(diskType.diskTypeId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotNull(diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testListDiskTypesWithSelectedFields() { + Page diskPage = compute.listDiskTypes(ZONE, + Compute.DiskTypeListOption.fields(Compute.DiskTypeField.CREATION_TIMESTAMP)); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while(diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + assertNull(diskType.id()); + assertNotNull(diskType.diskTypeId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertNotNull(diskType.creationTimestamp()); + assertNull(diskType.description()); + assertNull(diskType.validDiskSize()); + assertNull(diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testListDiskTypesWithFilter() { + Page diskPage = compute.listDiskTypes(ZONE, Compute.DiskTypeListOption.filter( + Compute.DiskTypeFilter.equals(Compute.DiskTypeField.DEFAULT_DISK_SIZE_GB, 375))); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while(diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.id()); + assertNotNull(diskType.diskTypeId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertEquals(375, (long) diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testAggregatedListDiskTypes() { + Page diskPage = compute.listDiskTypes(); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while(diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.id()); + assertNotNull(diskType.diskTypeId()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotNull(diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testAggregatedListDiskTypesWithFilter() { + Page diskPage = compute.listDiskTypes(Compute.DiskTypeAggregatedListOption.filter( + Compute.DiskTypeFilter.notEquals(Compute.DiskTypeField.DEFAULT_DISK_SIZE_GB, 375))); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while(diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.id()); + assertNotNull(diskType.diskTypeId()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotEquals(375, (long) diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testGetMachineType() { + MachineType machineType = compute.getMachineType(ZONE, MACHINE_TYPE); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertEquals(MACHINE_TYPE, machineType.machineTypeId().machineType()); + assertNotNull(machineType.id()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + + @Test + public void testGetMachineTypeWithSelectedFields() { + MachineType machineType = compute.getMachineType(ZONE, MACHINE_TYPE, + Compute.MachineTypeOption.fields(Compute.MachineTypeField.ID)); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertEquals(MACHINE_TYPE, machineType.machineTypeId().machineType()); + assertNotNull(machineType.id()); + assertNull(machineType.creationTimestamp()); + assertNull(machineType.description()); + assertNull(machineType.cpus()); + assertNull(machineType.memoryMb()); + assertNull(machineType.maximumPersistentDisks()); + assertNull(machineType.maximumPersistentDisksSizeGb()); + } + + @Test + public void testListMachineTypes() { + Page machinePage = compute.listMachineTypes(ZONE); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while(machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertNotNull(machineType.id()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testListMachineTypesWithSelectedFields() { + Page machinePage = compute.listMachineTypes(ZONE, + Compute.MachineTypeListOption.fields(Compute.MachineTypeField.CREATION_TIMESTAMP)); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while(machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertNull(machineType.id()); + assertNotNull(machineType.creationTimestamp()); + assertNull(machineType.description()); + assertNull(machineType.cpus()); + assertNull(machineType.memoryMb()); + assertNull(machineType.maximumPersistentDisks()); + assertNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testListMachineTypesWithFilter() { + Page machinePage = compute.listMachineTypes(ZONE, + Compute.MachineTypeListOption.filter( + Compute.MachineTypeFilter.equals(Compute.MachineTypeField.GUEST_CPUS, 2))); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while(machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertNotNull(machineType.id()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertEquals(2, (long) machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testAggregatedListMachineTypes() { + Page machinePage = compute.listMachineTypes(); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while(machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertNotNull(machineType.id()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testAggregatedListMachineTypesWithFilter() { + Page machinePage = + compute.listMachineTypes(Compute.MachineTypeAggregatedListOption.filter( + Compute.MachineTypeFilter.notEquals(Compute.MachineTypeField.GUEST_CPUS, 2))); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while(machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertNotNull(machineType.id()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotEquals(2, (long) machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testGetLicense() { + License license= compute.getLicense(LICENSE_ID); + assertEquals(LICENSE_ID, license.licenseId()); + assertNotNull(license.chargesUseFee()); + } + + @Test + public void testGetLicenseWithSelectedFields() { + License license = compute.getLicense(LICENSE_ID, Compute.LicenseOption.fields()); + assertEquals(LICENSE_ID, license.licenseId()); + assertNull(license.chargesUseFee()); + } + + @Test + public void testGetRegion() { + Region region = compute.getRegion(REGION); + assertEquals(REGION, region.regionId().region()); + assertNotNull(region.description()); + assertNotNull(region.creationTimestamp()); + assertNotNull(region.id()); + assertNotNull(region.quotas()); + assertNotNull(region.status()); + assertNotNull(region.zones()); + } + + @Test + public void testGetRegionWithSelectedFields() { + Region region = compute.getRegion(REGION, Compute.RegionOption.fields(Compute.RegionField.ID)); + assertEquals(REGION, region.regionId().region()); + assertNotNull(region.id()); + assertNull(region.description()); + assertNull(region.creationTimestamp()); + assertNull(region.quotas()); + assertNull(region.status()); + assertNull(region.zones()); + } + + @Test + public void testListRegions() { + Page regionPage = compute.listRegions(); + Iterator regionIterator = regionPage.iterateAll(); + while(regionIterator.hasNext()) { + Region region = regionIterator.next(); + assertNotNull(region.regionId()); + assertNotNull(region.description()); + assertNotNull(region.creationTimestamp()); + assertNotNull(region.id()); + assertNotNull(region.quotas()); + assertNotNull(region.status()); + assertNotNull(region.zones()); + } + } + + @Test + public void testListRegionsWithSelectedFields() { + Page regionPage = + compute.listRegions(Compute.RegionListOption.fields(Compute.RegionField.ID)); + Iterator regionIterator = regionPage.iterateAll(); + while(regionIterator.hasNext()) { + Region region = regionIterator.next(); + assertNotNull(region.regionId()); + assertNull(region.description()); + assertNull(region.creationTimestamp()); + assertNotNull(region.id()); + assertNull(region.quotas()); + assertNull(region.status()); + assertNull(region.zones()); + } + } + + @Test + public void testListRegionsWithFilter() { + Page regionPage = compute.listRegions(Compute.RegionListOption.filter( + Compute.RegionFilter.equals(Compute.RegionField.NAME, REGION))); + Iterator regionIterator = regionPage.iterateAll(); + assertEquals(REGION, regionIterator.next().regionId().region()); + assertFalse(regionIterator.hasNext()); + } + + @Test + public void testGetZone() { + Zone zone = compute.getZone(ZONE); + assertEquals(ZONE, zone.zoneId().zone()); + assertNotNull(zone.id()); + assertNotNull(zone.creationTimestamp()); + assertNotNull(zone.description()); + assertNotNull(zone.status()); + assertNotNull(zone.region()); + } + + @Test + public void testGetZoneWithSelectedFields() { + Zone zone = compute.getZone(ZONE, Compute.ZoneOption.fields(Compute.ZoneField.ID)); + assertEquals(ZONE, zone.zoneId().zone()); + assertNotNull(zone.id()); + assertNull(zone.creationTimestamp()); + assertNull(zone.description()); + assertNull(zone.status()); + assertNull(zone.maintenanceWindows()); + assertNull(zone.region()); + } + + @Test + public void testListZones() { + Page zonePage = compute.listZones(); + Iterator zoneIterator = zonePage.iterateAll(); + while(zoneIterator.hasNext()) { + Zone zone = zoneIterator.next(); + assertNotNull(zone.zoneId()); + assertNotNull(zone.id()); + assertNotNull(zone.creationTimestamp()); + assertNotNull(zone.description()); + assertNotNull(zone.status()); + assertNotNull(zone.region()); + } + } + + @Test + public void testListZonesWithSelectedFields() { + Page zonePage = compute.listZones( + Compute.ZoneListOption.fields(Compute.ZoneField.CREATION_TIMESTAMP)); + Iterator zoneIterator = zonePage.iterateAll(); + while(zoneIterator.hasNext()) { + Zone zone = zoneIterator.next(); + assertNotNull(zone.zoneId()); + assertNull(zone.id()); + assertNotNull(zone.creationTimestamp()); + assertNull(zone.description()); + assertNull(zone.status()); + assertNull(zone.region()); + } + } + + @Test + public void testListZonesWithFilter() { + Page zonePage = compute.listZones( + Compute.ZoneListOption.filter(Compute.ZoneFilter.equals(Compute.ZoneField.NAME, ZONE))); + Iterator zoneIterator = zonePage.iterateAll(); + assertEquals(ZONE, zoneIterator.next().zoneId().zone()); + assertFalse(zoneIterator.hasNext()); + } +}