diff --git a/CHANGELOG.md b/CHANGELOG.md
index d0f76cac..4bb85894 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -33,7 +33,6 @@ FEATURES:
* **New Resource:** `netapp-ontap_svm_peers_resource` ([#51](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/51))
* **New Resource:** `netapp-ontap_protocols_cifs_user_group_member_resource` ([#123](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/123))
* **New Resource:** `netapp-ontap_storage_flexcache_resource` ([#46](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/46))
-* **New Resource:** `netapp-protocols_san_lun-maps_resource` ([#13](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/13))
* **New Resource:** `netapp-ontap_protocols_san_lun-maps_resource` ([#13](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/13))
* **New Resource:** `netapp-ontap_name_services_ldap_resource` ([#25](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/25))
* **New Resource:** `netapp-ontap_protocols_cifs_service_resource` ([#23](https://github.com/NetApp/terraform-provider-netapp-ontap/issues/23))
diff --git a/docs/data-sources/storage_flexcache_data_source.md b/docs/data-sources/storage_flexcache_data_source.md
new file mode 100644
index 00000000..cf318bef
--- /dev/null
+++ b/docs/data-sources/storage_flexcache_data_source.md
@@ -0,0 +1,92 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "netapp-ontap_storage_flexcache_data_source Data Source - terraform-provider-netapp-ontap"
+subcategory: "storage"
+description: |-
+ Flexcache data source
+---
+
+# netapp-ontap_storage_flexcache_data_source (Data Source)
+
+Retrieves the details of a flexcache.
+
+### Related ONTAP commands
+* volume flexcache show
+
+## Example Usage
+```terraform
+data "netapp-ontap_storage_flexcache_data_source" "storage_flexcache" {
+ # required to know which system to interface with
+ cx_profile_name = "cluster1"
+ name = "fc1"
+ svm_name = "automation"
+}
+```
+
+
+
+## Schema
+
+### Required
+
+- `cx_profile_name` (String) Connection profile name
+- `name` (String) The name of the flexcache volume
+- `svm_name` (String) Name of the svm to use
+
+### Read-Only
+
+- `aggregates` (Attributes Set) (see [below for nested schema](#nestedatt--aggregates))
+- `constituents_per_aggregate` (Number) The number of constituents per aggregate
+- `dr_cache` (Boolean) The state of the dr cache
+- `global_file_locking_enabled` (Boolean) The state of the global file locking
+- `guarantee` (Attributes) The guarantee of the volume (see [below for nested schema](#nestedatt--guarantee))
+- `id` (String) The UUID of the flexcache volume
+- `junction_path` (String) Name of the junction path
+- `origins` (Attributes Set) Set of the origin volumes (see [below for nested schema](#nestedatt--origins))
+- `size` (Number) The size of the flexcache volume
+- `size_unit` (String) The unit used to interpret the size parameter
+- `use_tiered_aggregate` (Boolean) The state of the use tiered aggregates
+
+
+### Nested Schema for `aggregates`
+
+Read-Only:
+
+- `id` (String) UUID of the aggregate
+- `name` (String) Name of the aggregate
+
+
+
+### Nested Schema for `guarantee`
+
+Read-Only:
+
+- `type` (String) The type of guarantee
+
+
+
+### Nested Schema for `origins`
+
+Required:
+
+- `svm` (Attributes) Origin volume SVM (see [below for nested schema](#nestedatt--origins--svm))
+- `volume` (Attributes) Origin volume (see [below for nested schema](#nestedatt--origins--volume))
+
+
+### Nested Schema for `origins.svm`
+
+Read-Only:
+
+- `id` (String) ID of the origin volume
+- `name` (String) Name of the origin volume
+
+
+
+### Nested Schema for `origins.volume`
+
+Read-Only:
+
+- `id` (String) ID of the origin volume
+- `name` (String) Name of the origin volume
+
+
diff --git a/docs/data-sources/storage_flexcaches_data_source.md b/docs/data-sources/storage_flexcaches_data_source.md
new file mode 100644
index 00000000..eb10baf4
--- /dev/null
+++ b/docs/data-sources/storage_flexcaches_data_source.md
@@ -0,0 +1,118 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "netapp-ontap_storage_flexcaches_data_source Data Source - terraform-provider-netapp-ontap"
+subcategory: "storage"
+description: |-
+ Flexcaches data source
+---
+
+# netapp-ontap_storage_flexcaches_data_source (Data Source)
+
+Retrieves the details of one or more flexcaches by filter.
+
+### Related ONTAP commands
+* volume flexcache show
+
+## Example Usage
+data "netapp-ontap_storage_flexcaches_data_source" "storage_flexcache" {
+ # required to know which system to interface with
+ cx_profile_name = "cluster1"
+ filter = {
+ name = "fc*"
+ svm_name = "automation"
+ }
+}
+```
+
+
+
+
+## Schema
+
+### Required
+
+- `cx_profile_name` (String) Connection profile name
+
+### Optional
+
+- `filter` (Attributes) (see [below for nested schema](#nestedatt--filter))
+
+### Read-Only
+
+- `storage_flexcaches` (Attributes List) (see [below for nested schema](#nestedatt--storage_flexcaches))
+
+
+### Nested Schema for `filter`
+
+Optional:
+
+- `name` (String) StorageFlexcache name
+- `svm_name` (String) StorageFlexcache svm name
+
+
+
+### Nested Schema for `storage_flexcaches`
+
+Required:
+
+- `cx_profile_name` (String) Connection profile name
+- `name` (String) The name of the flexcache volume to manage
+- `svm_name` (String) Name of the svm to use
+
+Read-Only:
+
+- `aggregates` (Attributes Set) (see [below for nested schema](#nestedatt--storage_flexcaches--aggregates))
+- `constituents_per_aggregate` (Number) The number of constituents per aggregate
+- `dr_cache` (Boolean) The state of the dr cache
+- `global_file_locking_enabled` (Boolean) The state of the global file locking
+- `guarantee` (Attributes) The guarantee of the volume (see [below for nested schema](#nestedatt--storage_flexcaches--guarantee))
+- `id` (String) The UUID of the flexcache volume
+- `junction_path` (String) Name of the junction path
+- `origins` (Attributes Set) Set of the origin volumes (see [below for nested schema](#nestedatt--storage_flexcaches--origins))
+- `size` (Number) The size of the flexcache volume
+- `size_unit` (String) The unit used to interpret the size parameter
+- `use_tiered_aggregate` (Boolean) The state of the use tiered aggregates
+
+
+### Nested Schema for `storage_flexcaches.aggregates`
+
+Read-Only:
+
+- `id` (String) ID of the aggregate
+- `name` (String) Name of the aggregate
+
+
+
+### Nested Schema for `storage_flexcaches.guarantee`
+
+Read-Only:
+
+- `type` (String) The type of the guarantee
+
+
+
+### Nested Schema for `storage_flexcaches.origins`
+
+Required:
+
+- `svm` (Attributes) Origin volume SVM (see [below for nested schema](#nestedatt--storage_flexcaches--origins--svm))
+- `volume` (Attributes) Origin volume (see [below for nested schema](#nestedatt--storage_flexcaches--origins--volume))
+
+
+### Nested Schema for `storage_flexcaches.origins.svm`
+
+Read-Only:
+
+- `id` (String) ID of the origin volume SVM
+- `name` (String) Name of the origin volume SVM
+
+
+
+### Nested Schema for `storage_flexcaches.origins.volume`
+
+Read-Only:
+
+- `id` (String) ID of the origin volume
+- `name` (String) Name of the origin volume
+
+
diff --git a/docs/resources/storage_flexcache_resource.md b/docs/resources/storage_flexcache_resource.md
new file mode 100644
index 00000000..63e80016
--- /dev/null
+++ b/docs/resources/storage_flexcache_resource.md
@@ -0,0 +1,118 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "netapp-ontap_storage_flexcache_resource Resource - terraform-provider-netapp-ontap"
+subcategory: "storage"
+description: |-
+ Flexcache resource
+---
+
+# netapp-ontap_storage_flexcache_resource (Resource)
+
+Flexcache resource
+
+### Related ONTAP commands
+* volume flexcache create
+* volume flexcache prepopulate start
+
+## Example Usage
+resource "netapp-ontap_storage_flexcache_resource" "storage_flexcache" {
+ # required to know which system to interface with
+ cx_profile_name = "cluster1"
+ name = "fc1"
+ svm_name = "automation"
+ origins = [
+ {
+ volume = {
+ name = "vol1"
+ },
+ svm = {
+ name = "automation"
+ }
+ }
+ ]
+ size = 400
+ size_unit = "mb"
+ guarantee = {
+ type = "none"
+ }
+ dr_cache = false
+ global_file_locking_enabled = false
+ aggregates = [
+ {
+ name = "aggr1"
+ }
+ ]
+}
+```
+
+
+
+## Schema
+
+### Required
+
+- `cx_profile_name` (String) Connection profile name
+- `name` (String) The name of the flexcache volume to manage
+- `origins` (Attributes Set) Set of the origin volumes (see [below for nested schema](#nestedatt--origins))
+- `svm_name` (String) Name of the svm to use
+
+### Optional
+
+- `aggregates` (Attributes Set) Set of the aggregates to use (see [below for nested schema](#nestedatt--aggregates))
+- `constituents_per_aggregate` (Number) The number of constituents per aggregate
+- `dr_cache` (Boolean) The state of the dr cache
+- `global_file_locking_enabled` (Boolean) The state of the global file locking
+- `guarantee` (Attributes) The guarantee of the volume (see [below for nested schema](#nestedatt--guarantee))
+- `junction_path` (String) Name of the junction path
+- `size` (Number) The size of the flexcache volume
+- `size_unit` (String) The unit used to interpret the size parameter
+- `use_tiered_aggregate` (Boolean) The state of the use tiered aggregates
+
+### Read-Only
+
+- `id` (String) The ID of the volume
+
+
+### Nested Schema for `origins`
+
+Required:
+
+- `svm` (Attributes) origin volume SVM (see [below for nested schema](#nestedatt--origins--svm))
+- `volume` (Attributes) origin volume (see [below for nested schema](#nestedatt--origins--volume))
+
+
+### Nested Schema for `origins.svm`
+
+Optional:
+
+- `id` (String) ID of the origin volume SVM
+- `name` (String) Name of the origin volume SVM
+
+
+
+### Nested Schema for `origins.volume`
+
+Optional:
+
+- `id` (String) ID of the origin volume
+- `name` (String) Name of the origin volume
+
+
+
+
+### Nested Schema for `aggregates`
+
+Optional:
+
+- `id` (String) UUID of the aggregate
+- `name` (String) Name of the aggregate
+
+
+
+### Nested Schema for `guarantee`
+
+Optional:
+
+- `type` (String) The type of guarantee
+
+
diff --git a/examples/data-sources/netapp-ontap_storage_flexcache/data-source.tf b/examples/data-sources/netapp-ontap_storage_flexcache/data-source.tf
new file mode 100644
index 00000000..868f2b62
--- /dev/null
+++ b/examples/data-sources/netapp-ontap_storage_flexcache/data-source.tf
@@ -0,0 +1,6 @@
+data "netapp-ontap_storage_flexcache_data_source" "storage_flexcache" {
+ # required to know which system to interface with
+ cx_profile_name = "cluster5"
+ name = "fc5"
+ svm_name = "automation"
+}
diff --git a/examples/data-sources/netapp-ontap_storage_flexcache/provider.tf b/examples/data-sources/netapp-ontap_storage_flexcache/provider.tf
new file mode 120000
index 00000000..c6b7138f
--- /dev/null
+++ b/examples/data-sources/netapp-ontap_storage_flexcache/provider.tf
@@ -0,0 +1 @@
+../../provider/provider.tf
\ No newline at end of file
diff --git a/examples/data-sources/netapp-ontap_storage_flexcache/variables.tf b/examples/data-sources/netapp-ontap_storage_flexcache/variables.tf
new file mode 100644
index 00000000..b79f07ed
--- /dev/null
+++ b/examples/data-sources/netapp-ontap_storage_flexcache/variables.tf
@@ -0,0 +1,11 @@
+# Terraform will prompt for values, unless a tfvars file is present.
+variable "username" {
+ type = string
+}
+variable "password" {
+ type = string
+ sensitive = true
+}
+variable "validate_certs" {
+ type = bool
+}
diff --git a/examples/data-sources/netapp-ontap_storage_flexcaches/data-source.tf b/examples/data-sources/netapp-ontap_storage_flexcaches/data-source.tf
new file mode 100644
index 00000000..8f95acf6
--- /dev/null
+++ b/examples/data-sources/netapp-ontap_storage_flexcaches/data-source.tf
@@ -0,0 +1,8 @@
+data "netapp-ontap_storage_flexcaches_data_source" "storage_flexcache" {
+ # required to know which system to interface with
+ cx_profile_name = "cluster4"
+ filter = {
+ name = "f*"
+ svm_name = "automation"
+ }
+}
diff --git a/examples/data-sources/netapp-ontap_storage_flexcaches/provider.tf b/examples/data-sources/netapp-ontap_storage_flexcaches/provider.tf
new file mode 120000
index 00000000..c6b7138f
--- /dev/null
+++ b/examples/data-sources/netapp-ontap_storage_flexcaches/provider.tf
@@ -0,0 +1 @@
+../../provider/provider.tf
\ No newline at end of file
diff --git a/examples/data-sources/netapp-ontap_storage_flexcaches/variables.tf b/examples/data-sources/netapp-ontap_storage_flexcaches/variables.tf
new file mode 100644
index 00000000..b79f07ed
--- /dev/null
+++ b/examples/data-sources/netapp-ontap_storage_flexcaches/variables.tf
@@ -0,0 +1,11 @@
+# Terraform will prompt for values, unless a tfvars file is present.
+variable "username" {
+ type = string
+}
+variable "password" {
+ type = string
+ sensitive = true
+}
+variable "validate_certs" {
+ type = bool
+}
diff --git a/examples/resources/netapp-ontap_storage_flexcache/provider.tf b/examples/resources/netapp-ontap_storage_flexcache/provider.tf
new file mode 120000
index 00000000..c6b7138f
--- /dev/null
+++ b/examples/resources/netapp-ontap_storage_flexcache/provider.tf
@@ -0,0 +1 @@
+../../provider/provider.tf
\ No newline at end of file
diff --git a/examples/resources/netapp-ontap_storage_flexcache/resource.tf b/examples/resources/netapp-ontap_storage_flexcache/resource.tf
new file mode 100644
index 00000000..18443351
--- /dev/null
+++ b/examples/resources/netapp-ontap_storage_flexcache/resource.tf
@@ -0,0 +1,28 @@
+resource "netapp-ontap_storage_flexcache_resource" "storage_flexcache" {
+ # required to know which system to interface with
+ cx_profile_name = "cluster4"
+ name = "acc_test_storage_flexcache_volume"
+ svm_name = "acc_test"
+ origins = [
+ {
+ volume = {
+ name = "acc_test_storage_flexcache_origin_volume"
+ },
+ svm = {
+ name = "acc_test"
+ }
+ }
+ ]
+ size = 400
+ size_unit = "mb"
+ guarantee = {
+ type = "none"
+ }
+ dr_cache = false
+ global_file_locking_enabled = false
+ aggregates = [
+ {
+ name = "acc_test"
+ }
+ ]
+}
diff --git a/examples/resources/netapp-ontap_storage_flexcache/variables.tf b/examples/resources/netapp-ontap_storage_flexcache/variables.tf
new file mode 100644
index 00000000..b79f07ed
--- /dev/null
+++ b/examples/resources/netapp-ontap_storage_flexcache/variables.tf
@@ -0,0 +1,11 @@
+# Terraform will prompt for values, unless a tfvars file is present.
+variable "username" {
+ type = string
+}
+variable "password" {
+ type = string
+ sensitive = true
+}
+variable "validate_certs" {
+ type = bool
+}
diff --git a/internal/interfaces/storage_flexcache.go b/internal/interfaces/storage_flexcache.go
new file mode 100644
index 00000000..d7021d8d
--- /dev/null
+++ b/internal/interfaces/storage_flexcache.go
@@ -0,0 +1,161 @@
+package interfaces
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/mitchellh/mapstructure"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/restclient"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/utils"
+)
+
+// StorageFlexcacheGetDataModelONTAP describes the GET record data model using go types for mapping.
+type StorageFlexcacheGetDataModelONTAP struct {
+ Name string
+ SVM svm
+ Aggregates []StorageFlexcacheAggregate `mapstructure:"aggregates"`
+ Origins []StorageFlexcacheOrigin `mapstructure:"origins"`
+ JunctionPath string `mapstructure:"junction_path,omitempty"`
+ Size int `mapstructure:"size,omitempty"`
+ Path string `mapstructure:"path,omitempty"`
+ Guarantee StorageFlexcacheGuarantee `mapstructure:"guarantee,omitempty"`
+ DrCache bool `mapstructure:"dr_cache,omitempty"`
+ GlobalFileLockingEnabled bool `mapstructure:"global_file_locking_enabled,omitempty"`
+ UseTieredAggregate bool `mapstructure:"use_tiered_aggregate,omitempty"`
+ ConstituentsPerAggregate int `mapstructure:"constituents_per_aggregate,omitempty"`
+ UUID string
+}
+
+// StorageFlexcacheResourceModel describes the resource data model.
+type StorageFlexcacheResourceModel struct {
+ Name string `mapstructure:"name,omitempty"`
+ SVM svm `mapstructure:"svm,omitempty"`
+ Origins []map[string]interface{} `mapstructure:"origins,omitempty"`
+ JunctionPath string `mapstructure:"junction_path,omitempty"`
+ Size int `mapstructure:"size,omitempty"`
+ Path string `mapstructure:"path,omitempty"`
+ Guarantee StorageFlexcacheGuarantee `mapstructure:"guarantee,omitempty"`
+ DrCache bool `mapstructure:"dr_cache"`
+ GlobalFileLockingEnabled bool `mapstructure:"global_file_locking_enabled"`
+ UseTieredAggregate bool `mapstructure:"use_tiered_aggregate"`
+ ConstituentsPerAggregate int `mapstructure:"constituents_per_aggregate,omitempty"`
+ Aggregates []map[string]interface{} `mapstructure:"aggregates,omitempty"`
+}
+
+// StorageFlexcacheGuarantee describes the guarantee data model of Guarantee within StorageFlexcacheResourceModel.
+type StorageFlexcacheGuarantee struct {
+ Type string `mapstructure:"type,omitempty"`
+}
+
+// StorageFlexcacheOrigin describes the origin data model of Origin within StorageFlexcacheResourceModel.
+type StorageFlexcacheOrigin struct {
+ Volume StorageFlexcacheVolume `mapstructure:"volume"`
+ SVM StorageFlexcacheSVM `mapstructure:"svm"`
+}
+
+// StorageFlexcacheVolume describes the volume data model of Volume within StorageFlexcacheOrigin.
+type StorageFlexcacheVolume struct {
+ Name string `mapstructure:"name,omitempty"`
+ ID string `mapstructure:"uuid,omitempty"`
+}
+
+// StorageFlexcacheSVM describes the svm data model of SVM within StorageFlexcacheOrigin.
+type StorageFlexcacheSVM struct {
+ Name string `mapstructure:"name,omitempty"`
+ ID string `mapstructure:"uuid,omitempty"`
+}
+
+// StorageFlexcacheAggregate describes the aggregate data model of Aggregate within StorageFlexcacheResourceModel.
+type StorageFlexcacheAggregate struct {
+ Name string `mapstructure:"name,omitempty"`
+ ID string `mapstructure:"uuid,omitempty"`
+}
+
+// StorageFlexcacheDataSourceFilterModel describes the data source data model for queries.
+type StorageFlexcacheDataSourceFilterModel struct {
+ Name string `mapstructure:"name"`
+ SVMName string `mapstructure:"svm.name"`
+}
+
+// GetStorageFlexcacheByName to get flexcache info by name.
+func GetStorageFlexcacheByName(errorHandler *utils.ErrorHandler, r restclient.RestClient, name string, svmName string) (*StorageFlexcacheGetDataModelONTAP, error) {
+ query := r.NewQuery()
+ query.Add("name", name)
+ query.Add("svm.name", svmName)
+ query.Fields([]string{"size", "path", "origins", "guarantee.type", "constituents_per_aggregate", "dr_cache", "global_file_locking_enabled", "use_tiered_aggregate", "aggregates"})
+ statusCode, response, err := r.GetNilOrOneRecord("storage/flexcache/flexcaches", query, nil)
+ if err != nil {
+ return nil, errorHandler.MakeAndReportError("error reading flexcache info", fmt.Sprintf("error on GET storage/flexcache/flexcaches: %s", err))
+ }
+ var dataONTAP *StorageFlexcacheGetDataModelONTAP
+ if err := mapstructure.Decode(response, &dataONTAP); err != nil {
+ return nil, errorHandler.MakeAndReportError("error decoding flexcache info", fmt.Sprintf("error on decode storage/flexcache/flexcaches: %s, statusCode %d, response %#v", err, statusCode, response))
+ }
+ tflog.Debug(errorHandler.Ctx, fmt.Sprintf("Read flexcache source - udata: %#v", dataONTAP))
+ return dataONTAP, nil
+}
+
+// GetStorageFlexcaches to get flexcaches info by filter
+func GetStorageFlexcaches(errorHandler *utils.ErrorHandler, r restclient.RestClient, filter *StorageFlexcacheDataSourceFilterModel) ([]StorageFlexcacheGetDataModelONTAP, error) {
+ api := "storage/flexcache/flexcaches"
+ query := r.NewQuery()
+ query.Fields([]string{"size", "path", "origins", "guarantee.type", "constituents_per_aggregate", "dr_cache", "global_file_locking_enabled", "use_tiered_aggregate", "aggregates"})
+ if filter != nil {
+ var filterMap map[string]interface{}
+ if err := mapstructure.Decode(filter, &filterMap); err != nil {
+ return nil, errorHandler.MakeAndReportError("error encoding storage flexcache filter info", fmt.Sprintf("error on filter %#v: %s", filter, err))
+ }
+ query.SetValues(filterMap)
+ }
+
+ statusCode, response, err := r.GetZeroOrMoreRecords(api, query, nil)
+ if err == nil && response == nil {
+ err = fmt.Errorf("no response for GET %s", api)
+ }
+ if err != nil {
+ return nil, errorHandler.MakeAndReportError("error reading storage flexcache info", fmt.Sprintf("error on GET %s: %s, statusCode %d", api, err, statusCode))
+ }
+
+ var dataONTAP []StorageFlexcacheGetDataModelONTAP
+ for _, info := range response {
+ var record StorageFlexcacheGetDataModelONTAP
+ if err := mapstructure.Decode(info, &record); err != nil {
+ return nil, errorHandler.MakeAndReportError(fmt.Sprintf("failed to decode response from GET %s", api),
+ fmt.Sprintf("error: %s, statusCode %d, info %#v", err, statusCode, info))
+ }
+ dataONTAP = append(dataONTAP, record)
+ }
+ tflog.Debug(errorHandler.Ctx, fmt.Sprintf("Read storage flexcache data source: %#v", dataONTAP))
+ return dataONTAP, nil
+}
+
+// CreateStorageFlexcache creates flexcache.
+// POST API returns result, but does not include the attributes that are not set. Make a spearate GET call to get all attributes.
+func CreateStorageFlexcache(errorHandler *utils.ErrorHandler, r restclient.RestClient, data StorageFlexcacheResourceModel) error {
+ var body map[string]interface{}
+ if err := mapstructure.Decode(data, &body); err != nil {
+ return errorHandler.MakeAndReportError("error encoding flexcache body", fmt.Sprintf("error on encoding storage/flexcache/flexcaches body: %s, body: %#v", err, data))
+ }
+ //The use-tiered-aggregate option is only supported when auto provisioning the FlexCache volume
+ if _, ok := body["aggregates"]; ok {
+ delete(body, "use_tiered_aggregate")
+ }
+ query := r.NewQuery()
+ query.Add("return_records", "false")
+ statusCode, _, err := r.CallCreateMethod("storage/flexcache/flexcaches", query, body)
+ if err != nil {
+ return errorHandler.MakeAndReportError("error creating flexcache", fmt.Sprintf("error on POST storage/flexcache/flexcaches: %s, statusCode %d", err, statusCode))
+ }
+
+ return nil
+
+}
+
+// DeleteStorageFlexcache to delete flexcache by id.
+func DeleteStorageFlexcache(errorHandler *utils.ErrorHandler, r restclient.RestClient, id string) error {
+ statusCode, _, err := r.CallDeleteMethod("storage/flexcache/flexcaches/"+id, nil, nil)
+ if err != nil {
+ return errorHandler.MakeAndReportError("error deleting flexcache", fmt.Sprintf("error on DELETE storage/flexcache/flexcaches: %s, statusCode %d", err, statusCode))
+ }
+ return nil
+}
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index fac0e77e..c447e956 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -165,6 +165,7 @@ func (p *ONTAPProvider) Resources(ctx context.Context) []func() resource.Resourc
NewSnapmirrorPolicyResource,
NewStorageLunResource,
NewSnapshotPolicyResource,
+ NewStorageFlexcacheRsource,
NewStorageVolumeResource,
NewStorageVolumeSnapshotResource,
NewSVMPeersResource,
@@ -223,6 +224,8 @@ func (p *ONTAPProvider) DataSources(ctx context.Context) []func() datasource.Dat
NewSnapmirrorPoliciesDataSource,
NewStorageAggregateDataSource,
NewStorageAggregatesDataSource,
+ NewStorageFlexcacheDataSource,
+ NewStorageFlexcachesDataSource,
NewStorageLunDataSource,
NewStorageLunsDataSource,
NewStorageVolumeSnapshotDataSource,
diff --git a/internal/provider/storage_flexcache_data_source.go b/internal/provider/storage_flexcache_data_source.go
new file mode 100644
index 00000000..39560cc2
--- /dev/null
+++ b/internal/provider/storage_flexcache_data_source.go
@@ -0,0 +1,332 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/interfaces"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/utils"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces
+var _ datasource.DataSource = &StorageFlexcacheDataSource{}
+
+// NewStorageFlexcacheDataSource is a helper function to simplify the provider implementation.
+func NewStorageFlexcacheDataSource() datasource.DataSource {
+ return &StorageFlexcacheDataSource{
+ config: resourceOrDataSourceConfig{
+ name: "storage_flexcache_data_source",
+ },
+ }
+}
+
+// StorageFlexcacheDataSource implements the datasource interface and defines the data model for the resource.
+type StorageFlexcacheDataSource struct {
+ config resourceOrDataSourceConfig
+}
+
+// StorageFlexcacheDataSourceModel describes the resource data model.
+type StorageFlexcacheDataSourceModel struct {
+ CxProfileName types.String `tfsdk:"cx_profile_name"`
+ Name types.String `tfsdk:"name"`
+ SvmName types.String `tfsdk:"svm_name"`
+ Origins types.Set `tfsdk:"origins"`
+ JunctionPath types.String `tfsdk:"junction_path"`
+ Size types.Int64 `tfsdk:"size"`
+ SizeUnit types.String `tfsdk:"size_unit"`
+ ConstituentsPerAggregate types.Int64 `tfsdk:"constituents_per_aggregate"`
+ DrCache types.Bool `tfsdk:"dr_cache"`
+ Guarantee types.Object `tfsdk:"guarantee"`
+ GlobalFileLockingEnabled types.Bool `tfsdk:"global_file_locking_enabled"`
+ UseTieredAggregate types.Bool `tfsdk:"use_tiered_aggregate"`
+ Aggregates types.Set `tfsdk:"aggregates"`
+ ID types.String `tfsdk:"id"`
+}
+
+// Metadata returns the resource type name.
+func (r *StorageFlexcacheDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_" + r.config.name
+}
+
+// Schema defines the schema for the resource.
+func (r *StorageFlexcacheDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Flexcache resource",
+
+ Attributes: map[string]schema.Attribute{
+ "cx_profile_name": schema.StringAttribute{
+ MarkdownDescription: "Connection profile name",
+ Required: true,
+ },
+ "name": schema.StringAttribute{
+ MarkdownDescription: "The name of the flexcache volume",
+ Required: true,
+ },
+ "svm_name": schema.StringAttribute{
+ MarkdownDescription: "Name of the svm to use",
+ Required: true,
+ },
+ "aggregates": schema.SetNestedAttribute{
+ MarkdownDescription: "",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the aggregate",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "UUID of the aggregate",
+ Computed: true,
+ },
+ },
+ },
+ },
+ "origins": schema.SetNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "volume": schema.SingleNestedAttribute{
+ MarkdownDescription: "Origin volume",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the origin volume",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the origin volume",
+ Computed: true,
+ },
+ },
+ },
+ "svm": schema.SingleNestedAttribute{
+ MarkdownDescription: "Origin volume SVM",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the origin volume",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the origin volume",
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ MarkdownDescription: "Set of the origin volumes",
+ Computed: true,
+ },
+ "junction_path": schema.StringAttribute{
+ MarkdownDescription: "Name of the junction path",
+ Computed: true,
+ },
+ "size": schema.Int64Attribute{
+ MarkdownDescription: "The size of the flexcache volume",
+ Computed: true,
+ },
+ "size_unit": schema.StringAttribute{
+ MarkdownDescription: "The unit used to interpret the size parameter",
+ Computed: true,
+ },
+ "constituents_per_aggregate": schema.Int64Attribute{
+ MarkdownDescription: "The number of constituents per aggregate",
+ Computed: true,
+ },
+ "dr_cache": schema.BoolAttribute{
+ MarkdownDescription: "The state of the dr cache",
+ Computed: true,
+ },
+ "guarantee": schema.SingleNestedAttribute{
+ MarkdownDescription: "The guarantee of the volume",
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ MarkdownDescription: "The type of guarantee",
+ Computed: true,
+ },
+ },
+ },
+ "global_file_locking_enabled": schema.BoolAttribute{
+ MarkdownDescription: "The state of the global file locking",
+ Computed: true,
+ },
+ "use_tiered_aggregate": schema.BoolAttribute{
+ MarkdownDescription: "The state of the use tiered aggregates",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "The UUID of the flexcache volume",
+ Computed: true,
+ },
+ },
+ }
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *StorageFlexcacheDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ config, ok := req.ProviderData.(Config)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected Config, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ }
+ r.config.providerConfig = config
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *StorageFlexcacheDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var data StorageFlexcacheDataSourceModel
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ errorHandler := utils.NewErrorHandler(ctx, &resp.Diagnostics)
+ // we need to defer setting the client until we can read the connection profile name
+ client, err := getRestClient(errorHandler, r.config, data.CxProfileName)
+ if err != nil {
+ // error reporting done inside NewClient
+ return
+ }
+ flexcache, err := interfaces.GetStorageFlexcacheByName(errorHandler, *client, data.Name.ValueString(), data.SvmName.ValueString())
+ if err != nil {
+ return
+ }
+ if flexcache == nil {
+ errorHandler.MakeAndReportError("No flexcache found", fmt.Sprintf("Flexcache %s not found.", data.Name))
+ return
+ }
+
+ size, sizeUnit := interfaces.ByteFormat(int64(flexcache.Size))
+ data.Size = types.Int64Value(int64(size))
+ data.SizeUnit = types.StringValue(sizeUnit)
+ data.JunctionPath = types.StringValue(flexcache.JunctionPath)
+ data.ConstituentsPerAggregate = types.Int64Value(int64(flexcache.ConstituentsPerAggregate))
+ data.DrCache = types.BoolValue(flexcache.DrCache)
+ data.GlobalFileLockingEnabled = types.BoolValue(flexcache.GlobalFileLockingEnabled)
+ data.UseTieredAggregate = types.BoolValue(flexcache.UseTieredAggregate)
+
+ elementTypes := map[string]attr.Type{
+ "type": types.StringType,
+ }
+ elements := map[string]attr.Value{
+ "type": types.StringValue(flexcache.Guarantee.Type),
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.Guarantee = objectValue
+
+ //Origins
+ setElements := []attr.Value{}
+ for _, origin := range flexcache.Origins {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedVolumeElements := map[string]attr.Value{
+ "name": types.StringValue(origin.Volume.Name),
+ "id": types.StringValue(origin.Volume.ID),
+ }
+ nestedSVMElements := map[string]attr.Value{
+ "name": types.StringValue(origin.SVM.Name),
+ "id": types.StringValue(origin.SVM.ID),
+ }
+ originVolumeObjectValue, diags := types.ObjectValue(nestedElementTypes, nestedVolumeElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ originSVMObjectValue, _ := types.ObjectValue(nestedElementTypes, nestedSVMElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ elementTypes := map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: nestedElementTypes},
+ "svm": types.ObjectType{AttrTypes: nestedElementTypes},
+ }
+ elements := map[string]attr.Value{
+ "volume": originVolumeObjectValue,
+ "svm": originSVMObjectValue,
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ setElements = append(setElements, objectValue)
+ }
+
+ setValue, diags := types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ "svm": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ data.Origins = setValue
+
+ //aggregate
+ setElements = []attr.Value{}
+ for _, aggregate := range flexcache.Aggregates {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedElements := map[string]attr.Value{
+ "name": types.StringValue(aggregate.Name),
+ "id": types.StringValue(aggregate.ID),
+ }
+ objectValue, diags := types.ObjectValue(nestedElementTypes, nestedElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ setElements = append(setElements, objectValue)
+ }
+ setValue, diags = types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.Aggregates = setValue
+ data.ID = types.StringValue(flexcache.UUID)
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+
+}
diff --git a/internal/provider/storage_flexcache_resource.go b/internal/provider/storage_flexcache_resource.go
new file mode 100644
index 00000000..29f1aad5
--- /dev/null
+++ b/internal/provider/storage_flexcache_resource.go
@@ -0,0 +1,727 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/mitchellh/mapstructure"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/interfaces"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/utils"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces
+var _ resource.Resource = &StorageFlexcacheResource{}
+
+// NewStorageFlexcacheRsource is a helper function to simplify the provider implementation.
+func NewStorageFlexcacheRsource() resource.Resource {
+ return &StorageFlexcacheResource{
+ config: resourceOrDataSourceConfig{
+ name: "storage_flexcache_resource",
+ },
+ }
+}
+
+// StorageFlexcacheResource defines the resource implementation.
+type StorageFlexcacheResource struct {
+ config resourceOrDataSourceConfig
+}
+
+// StorageFlexcacheResourceModel describes the resource data model.
+type StorageFlexcacheResourceModel struct {
+ CxProfileName types.String `tfsdk:"cx_profile_name"`
+ Name types.String `tfsdk:"name"`
+ SvmName types.String `tfsdk:"svm_name"`
+ Origins types.Set `tfsdk:"origins"`
+ JunctionPath types.String `tfsdk:"junction_path"`
+ Size types.Int64 `tfsdk:"size"`
+ SizeUnit types.String `tfsdk:"size_unit"`
+ ConstituentsPerAggregate types.Int64 `tfsdk:"constituents_per_aggregate"`
+ DrCache types.Bool `tfsdk:"dr_cache"`
+ Guarantee types.Object `tfsdk:"guarantee"`
+ GlobalFileLockingEnabled types.Bool `tfsdk:"global_file_locking_enabled"`
+ UseTieredAggregate types.Bool `tfsdk:"use_tiered_aggregate"`
+ ID types.String `tfsdk:"id"`
+ Aggregates types.Set `tfsdk:"aggregates"`
+}
+
+// StorageFlexCacheResourceOrigin describes the origin data model of Origin within StorageFlexcacheResourceModel.
+type StorageFlexCacheResourceOrigin struct {
+ Volume types.Object `tfsdk:"volume"`
+ SVM types.Object `tfsdk:"svm"`
+}
+
+// StorageFlexCacheResourceOriginVolume describes the volume data model of Volume within StorageFlexcacheOrigin.
+type StorageFlexCacheResourceOriginVolume struct {
+ Name types.String `tfsdk:"name"`
+ ID types.String `tfsdk:"id"`
+}
+
+// StorageFlexCacheResourceOriginSVM describes the SVM data model of SVM within StorageFlexcacheOrigin.
+type StorageFlexCacheResourceOriginSVM struct {
+ Name types.String `tfsdk:"name"`
+ ID types.String `tfsdk:"id"`
+}
+
+// StorageFlexCacheResourceOriginAggregate describes the aggregate data model of Aggregate within StorageFlexcacheResourceModel.
+type StorageFlexCacheResourceOriginAggregate struct {
+ Name types.String `tfsdk:"name"`
+ ID types.String `tfsdk:"id"`
+}
+
+// StorageFlexCacheGuarantee describes the guarantee data model of Guarantee within StorageFlexcacheResourceModel.
+type StorageFlexCacheGuarantee struct {
+ GuaranteeType types.String `tfsdk:"type"`
+}
+
+// StorageFlexCachePrepopulate describes the prepopulate data model of Prepopulate within StorageFlexcacheResourceModel.
+type StorageFlexCachePrepopulate struct {
+ DirPaths types.List `tfsdk:"dir_paths"`
+ ExcludeDirPaths types.List `tfsdk:"exclude_dir_paths"`
+ Recurse types.Bool `tfsdk:"recurse"`
+}
+
+// Metadata returns the resource type name.
+func (r *StorageFlexcacheResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_" + r.config.name
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *StorageFlexcacheResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ config, ok := req.ProviderData.(Config)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected Config, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ }
+ r.config.providerConfig = config
+}
+
+// Schema defines the schema for the resource.
+func (r *StorageFlexcacheResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Flexcache resource",
+
+ Attributes: map[string]schema.Attribute{
+ "cx_profile_name": schema.StringAttribute{
+ MarkdownDescription: "Connection profile name",
+ Required: true,
+ },
+ "name": schema.StringAttribute{
+ MarkdownDescription: "The name of the flexcache volume to manage",
+ Required: true,
+ },
+ "svm_name": schema.StringAttribute{
+ MarkdownDescription: "Name of the svm to use",
+ Required: true,
+ },
+ //there could be a space not enough or storage type error if the aggreates are not set
+ "aggregates": schema.SetNestedAttribute{
+ MarkdownDescription: "Set of the aggregates to use",
+ Optional: true,
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the aggregate",
+ Optional: true,
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "UUID of the aggregate",
+ Optional: true,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "origins": schema.SetNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "volume": schema.SingleNestedAttribute{
+ MarkdownDescription: "origin volume",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the origin volume",
+ Optional: true,
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the origin volume",
+ Optional: true,
+ Computed: true,
+ },
+ },
+ },
+ "svm": schema.SingleNestedAttribute{
+ MarkdownDescription: "origin volume SVM",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the origin volume SVM",
+ Optional: true,
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the origin volume SVM",
+ Optional: true,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ MarkdownDescription: "Set of the origin volumes",
+ Required: true,
+ },
+ "junction_path": schema.StringAttribute{
+ MarkdownDescription: "Name of the junction path",
+ Computed: true,
+ Optional: true,
+ },
+ "size": schema.Int64Attribute{
+ MarkdownDescription: "The size of the flexcache volume",
+ Computed: true,
+ Optional: true,
+ },
+ "size_unit": schema.StringAttribute{
+ MarkdownDescription: "The unit used to interpret the size parameter",
+ Computed: true,
+ Optional: true,
+ },
+ "constituents_per_aggregate": schema.Int64Attribute{
+ MarkdownDescription: "The number of constituents per aggregate",
+ Computed: true,
+ Optional: true,
+ },
+ "dr_cache": schema.BoolAttribute{
+ MarkdownDescription: "The state of the dr cache",
+ Computed: true,
+ Optional: true,
+ },
+ "guarantee": schema.SingleNestedAttribute{
+ MarkdownDescription: "The guarantee of the volume",
+ Computed: true,
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ MarkdownDescription: "The type of guarantee",
+ Computed: true,
+ Optional: true,
+ },
+ },
+ },
+ "global_file_locking_enabled": schema.BoolAttribute{
+ MarkdownDescription: "The state of the global file locking",
+ Computed: true,
+ Optional: true,
+ },
+ "use_tiered_aggregate": schema.BoolAttribute{
+ MarkdownDescription: "The state of the use tiered aggregates",
+ Computed: true,
+ Optional: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "The ID of the volume",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ }
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *StorageFlexcacheResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ var data StorageFlexcacheResourceModel
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ errorHandler := utils.NewErrorHandler(ctx, &resp.Diagnostics)
+ // we need to defer setting the client until we can read the connection profile name
+ client, err := getRestClient(errorHandler, r.config, data.CxProfileName)
+ if err != nil {
+ // error reporting done inside NewClient
+ return
+ }
+
+ flexcache, err := interfaces.GetStorageFlexcacheByName(errorHandler, *client, data.Name.ValueString(), data.SvmName.ValueString())
+ if err != nil {
+ return
+ }
+ if flexcache == nil {
+ errorHandler.MakeAndReportError("No flexcahce found", fmt.Sprintf("Flexcache %s not found.", data.Name))
+ return
+ }
+
+ size, sizeUnit := interfaces.ByteFormat(int64(flexcache.Size))
+ data.Size = types.Int64Value(int64(size))
+ data.SizeUnit = types.StringValue(sizeUnit)
+ data.JunctionPath = types.StringValue(flexcache.JunctionPath)
+ data.ConstituentsPerAggregate = types.Int64Value(int64(flexcache.ConstituentsPerAggregate))
+ data.DrCache = types.BoolValue(flexcache.DrCache)
+ data.GlobalFileLockingEnabled = types.BoolValue(flexcache.GlobalFileLockingEnabled)
+ data.UseTieredAggregate = types.BoolValue(flexcache.UseTieredAggregate)
+ data.ID = types.StringValue(flexcache.UUID)
+ elementTypes := map[string]attr.Type{
+ "type": types.StringType,
+ }
+ elements := map[string]attr.Value{
+ "type": types.StringValue(flexcache.Guarantee.Type),
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.Guarantee = objectValue
+
+ setElements := []attr.Value{}
+ for _, origin := range flexcache.Origins {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedVolumeElements := map[string]attr.Value{
+ "name": types.StringValue(origin.Volume.Name),
+ "id": types.StringValue(origin.Volume.ID),
+ }
+ nestedSVMElements := map[string]attr.Value{
+ "name": types.StringValue(origin.SVM.Name),
+ "id": types.StringValue(origin.SVM.ID),
+ }
+ originVolumeObjectValue, diags := types.ObjectValue(nestedElementTypes, nestedVolumeElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ originSVMObjectValue, _ := types.ObjectValue(nestedElementTypes, nestedSVMElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ elementTypes := map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: nestedElementTypes},
+ "svm": types.ObjectType{AttrTypes: nestedElementTypes},
+ }
+ elements := map[string]attr.Value{
+ "volume": originVolumeObjectValue,
+ "svm": originSVMObjectValue,
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ setElements = append(setElements, objectValue)
+ }
+
+ setValue, diags := types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ "svm": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ data.Origins = setValue
+
+ setElements = []attr.Value{}
+ for _, aggregate := range flexcache.Aggregates {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedElements := map[string]attr.Value{
+ "name": types.StringValue(aggregate.Name),
+ "id": types.StringValue(aggregate.ID),
+ }
+ objectValue, diags := types.ObjectValue(nestedElementTypes, nestedElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ setElements = append(setElements, objectValue)
+ }
+ setValue, diags = types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.Aggregates = setValue
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+
+}
+
+// Create creates a new flexcache volume
+func (r *StorageFlexcacheResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ var data *StorageFlexcacheResourceModel
+ resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ errorHandler := utils.NewErrorHandler(ctx, &resp.Diagnostics)
+ client, err := getRestClient(errorHandler, r.config, data.CxProfileName)
+ if err != nil {
+ // error reporting done inside NewClient
+ return
+ }
+ var request interfaces.StorageFlexcacheResourceModel
+ if !data.SizeUnit.IsUnknown() {
+ if _, ok := interfaces.POW2BYTEMAP[data.SizeUnit.ValueString()]; !ok {
+ errorHandler.MakeAndReportError("error creating flexcache", fmt.Sprintf("invalid input for size_unit: %s, required one of: bytes, b, kb, mb, gb, tb, pb, eb, zb, yb", data.SizeUnit.ValueString()))
+ return
+ }
+ }
+ if !data.Size.IsUnknown() {
+ request.Size = int(data.Size.ValueInt64()) * interfaces.POW2BYTEMAP[data.SizeUnit.ValueString()]
+ }
+ request.Name = data.Name.ValueString()
+ request.SVM.Name = data.SvmName.ValueString()
+ if !data.JunctionPath.IsUnknown() {
+ request.JunctionPath = data.JunctionPath.ValueString()
+ }
+ if !data.ConstituentsPerAggregate.IsUnknown() {
+ request.ConstituentsPerAggregate = int(data.ConstituentsPerAggregate.ValueInt64())
+ }
+ if !data.DrCache.IsUnknown() {
+ request.DrCache = data.DrCache.ValueBool()
+ }
+ if !data.GlobalFileLockingEnabled.IsUnknown() {
+ request.GlobalFileLockingEnabled = data.GlobalFileLockingEnabled.ValueBool()
+ }
+ if !data.UseTieredAggregate.IsUnknown() {
+ request.UseTieredAggregate = data.UseTieredAggregate.ValueBool()
+ }
+ if !data.Guarantee.IsUnknown() {
+ var Guarantee StorageFlexCacheGuarantee
+ diags := data.Guarantee.As(ctx, &Guarantee, basetypes.ObjectAsOptions{})
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ request.Guarantee.Type = Guarantee.GuaranteeType.ValueString()
+ }
+ if !data.Origins.IsUnknown() {
+ origins := []interfaces.StorageFlexcacheOrigin{}
+
+ elements := make([]types.Object, 0, len(data.Origins.Elements()))
+ diags := data.Origins.ElementsAs(ctx, &elements, false)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ for _, v := range elements {
+ var origin StorageFlexCacheResourceOrigin
+ diags := v.As(ctx, &origin, basetypes.ObjectAsOptions{})
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ interfaceOrigin := interfaces.StorageFlexcacheOrigin{}
+ if !origin.Volume.IsUnknown() {
+ var volume StorageFlexCacheResourceOriginVolume
+ diags := origin.Volume.As(ctx, &volume, basetypes.ObjectAsOptions{})
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ interfaceVolume := interfaces.StorageFlexcacheVolume{}
+ if !volume.Name.IsUnknown() {
+ interfaceVolume.Name = volume.Name.ValueString()
+
+ }
+ if !volume.ID.IsUnknown() {
+ interfaceVolume.ID = volume.ID.ValueString()
+ }
+ interfaceOrigin.Volume = interfaceVolume
+ }
+ if !origin.SVM.IsUnknown() {
+ var svm StorageFlexCacheResourceOriginSVM
+ diags := origin.SVM.As(ctx, &svm, basetypes.ObjectAsOptions{})
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ interfaceSVM := interfaces.StorageFlexcacheSVM{}
+ if !svm.Name.IsUnknown() {
+
+ interfaceSVM.Name = svm.Name.ValueString()
+ }
+ if !svm.ID.IsUnknown() {
+
+ interfaceSVM.ID = svm.ID.ValueString()
+ }
+ interfaceOrigin.SVM = interfaceSVM
+ }
+
+ origins = append(origins, interfaceOrigin)
+
+ }
+
+ err := mapstructure.Decode(origins, &request.Origins)
+ if err != nil {
+ errorHandler.MakeAndReportError("error creating flexcache", fmt.Sprintf("error on encoding copies info: %s, copies %#v", err, origins))
+ return
+ }
+ }
+
+ if !data.Aggregates.IsUnknown() {
+ aggregates := []interfaces.StorageFlexcacheAggregate{}
+
+ elements := make([]types.Object, 0, len(data.Aggregates.Elements()))
+ diags := data.Aggregates.ElementsAs(ctx, &elements, false)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ for _, v := range elements {
+ var aggregate StorageFlexCacheResourceOriginAggregate
+ diags := v.As(ctx, &aggregate, basetypes.ObjectAsOptions{})
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ interfaceOriginAggregate := interfaces.StorageFlexcacheAggregate{}
+ if !aggregate.Name.IsUnknown() {
+ interfaceOriginAggregate.Name = aggregate.Name.ValueString()
+ }
+ if !aggregate.ID.IsUnknown() {
+ interfaceOriginAggregate.ID = aggregate.ID.ValueString()
+ }
+ aggregates = append(aggregates, interfaceOriginAggregate)
+
+ }
+
+ err := mapstructure.Decode(aggregates, &request.Aggregates)
+ if err != nil {
+ errorHandler.MakeAndReportError("error creating flexcache", fmt.Sprintf("error on encoding copies info: %s, copies %#v", err, aggregates))
+ return
+ }
+ }
+
+ err = interfaces.CreateStorageFlexcache(errorHandler, *client, request)
+ if err != nil {
+ return
+ }
+
+ flexcache, err := interfaces.GetStorageFlexcacheByName(errorHandler, *client, data.Name.ValueString(), data.SvmName.ValueString())
+ if err != nil {
+ return
+ }
+ if flexcache == nil {
+ errorHandler.MakeAndReportError("No flexcache found", fmt.Sprintf("flexcache %s not found.", data.Name))
+ return
+ }
+ size, sizeUnit := interfaces.ByteFormat(int64(flexcache.Size))
+ data.Size = types.Int64Value(int64(size))
+ data.SizeUnit = types.StringValue(sizeUnit)
+ data.JunctionPath = types.StringValue(flexcache.JunctionPath)
+ data.ConstituentsPerAggregate = types.Int64Value(int64(flexcache.ConstituentsPerAggregate))
+ data.DrCache = types.BoolValue(flexcache.DrCache)
+ data.GlobalFileLockingEnabled = types.BoolValue(flexcache.GlobalFileLockingEnabled)
+ data.UseTieredAggregate = types.BoolValue(flexcache.UseTieredAggregate)
+ data.ID = types.StringValue(flexcache.UUID)
+
+ elementTypes := map[string]attr.Type{
+ "type": types.StringType,
+ }
+ elements := map[string]attr.Value{
+ "type": types.StringValue(flexcache.Guarantee.Type),
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.Guarantee = objectValue
+
+ setElements := []attr.Value{}
+ for _, origin := range flexcache.Origins {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedVolumeElements := map[string]attr.Value{
+ "name": types.StringValue(origin.Volume.Name),
+ "id": types.StringValue(origin.Volume.ID),
+ }
+ nestedSVMElements := map[string]attr.Value{
+ "name": types.StringValue(origin.SVM.Name),
+ "id": types.StringValue(origin.SVM.ID),
+ }
+ originVolumeObjectValue, diags := types.ObjectValue(nestedElementTypes, nestedVolumeElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ originSVMObjectValue, _ := types.ObjectValue(nestedElementTypes, nestedSVMElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ elementTypes := map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: nestedElementTypes},
+ "svm": types.ObjectType{AttrTypes: nestedElementTypes},
+ }
+ elements := map[string]attr.Value{
+ "volume": originVolumeObjectValue,
+ "svm": originSVMObjectValue,
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ log.Printf("objectValue is: %#v", objectValue)
+ setElements = append(setElements, objectValue)
+ }
+
+ setValue, diags := types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ "svm": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ data.Origins = setValue
+
+ setElements = []attr.Value{}
+ for _, aggregate := range flexcache.Aggregates {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedElements := map[string]attr.Value{
+ "name": types.StringValue(aggregate.Name),
+ "id": types.StringValue(aggregate.ID),
+ }
+ objectValue, diags := types.ObjectValue(nestedElementTypes, nestedElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ setElements = append(setElements, objectValue)
+ }
+ setValue, diags = types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.Aggregates = setValue
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+
+}
+
+// Delete removes the flexcache volume
+func (r *StorageFlexcacheResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ var data *StorageFlexcacheResourceModel
+
+ // Read Terraform prior state data into the model
+ resp.Diagnostics.Append(req.State.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ errorHandler := utils.NewErrorHandler(ctx, &resp.Diagnostics)
+ client, err := getRestClient(errorHandler, r.config, data.CxProfileName)
+ if err != nil {
+ // error reporting done inside NewClient
+ return
+ }
+
+ if data.ID.IsUnknown() {
+ errorHandler.MakeAndReportError("UUID is null", "flexcache UUID is null")
+ return
+ }
+
+ err = interfaces.DeleteStorageFlexcache(errorHandler, *client, data.ID.ValueString())
+ if err != nil {
+ return
+ }
+}
+
+// Update updates the flexcache volume
+// If not specified in PATCH, prepopulate.recurse is default to true.
+// prepopulate.dir_paths is requried.
+func (r *StorageFlexcacheResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+
+ errorHandler := utils.NewErrorHandler(ctx, &resp.Diagnostics)
+ errorHandler.MakeAndReportError("Update not available", "No update can be done on flexcache resource.")
+
+}
+
+// ImportState imports the state of the flexcache
+func (r *StorageFlexcacheResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ tflog.Debug(ctx, fmt.Sprintf("import req an flexcache resource: %#v", req))
+ idParts := strings.Split(req.ID, ",")
+ if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" {
+ resp.Diagnostics.AddError(
+ "Unexpected Import Identifier",
+ fmt.Sprint("Expected ID in the format 'name,svm_name,cx_profile_name', got: ", req.ID),
+ )
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[0])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("svm_name"), idParts[1])...)
+ resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("cx_profile_name"), idParts[2])...)
+}
diff --git a/internal/provider/storage_flexcache_resource_test.go b/internal/provider/storage_flexcache_resource_test.go
new file mode 100644
index 00000000..5e07286a
--- /dev/null
+++ b/internal/provider/storage_flexcache_resource_test.go
@@ -0,0 +1,99 @@
+package provider
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+)
+
+func TestAccStorageFlexcacheResource(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ // Test non existant SVM
+ {
+ Config: testAccStorageFlexcacheResourceConfig("non-existant", "terraformTest4"),
+ ExpectError: regexp.MustCompile("2621462"),
+ },
+ // test bad volume name
+ {
+ Config: testAccStorageFlexcacheResourceConfig("non-existant", "name-cant-have-dashes"),
+ ExpectError: regexp.MustCompile("917888"),
+ },
+ // Read testing
+ {
+ Config: testAccStorageFlexcacheResourceConfig("acc_test", "accFlexcache"),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("netapp-ontap_storage_flexcache_resource.example", "name", "accFlexcache"),
+ resource.TestCheckNoResourceAttr("netapp-ontap_storage_flexcache_resource.example", "volname"),
+ ),
+ },
+ // Test importing a resource
+ {
+ ResourceName: "netapp-ontap_storage_flexcache_resource.example",
+ ImportState: true,
+ ImportStateId: fmt.Sprintf("%s,%s,%s", "accFlexcache", "acc_test", "cluster5"),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("netapp-ontap_storage_flexcache_resource.example", "name", "accFlexcache"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccStorageFlexcacheResourceConfig(svm, volName string) string {
+ if host == "" || admin == "" || password == "" {
+ host = os.Getenv("TF_ACC_NETAPP_HOST2")
+ admin = os.Getenv("TF_ACC_NETAPP_USER")
+ password = os.Getenv("TF_ACC_NETAPP_PASS2")
+ }
+ if host == "" || admin == "" || password == "" {
+ fmt.Println("TF_ACC_NETAPP_HOST, TF_ACC_NETAPP_USER, and TF_ACC_NETAPP_PASS must be set for acceptance tests")
+ os.Exit(1)
+ }
+ return fmt.Sprintf(`
+provider "netapp-ontap" {
+ connection_profiles = [
+ {
+ name = "cluster5"
+ hostname = "%s"
+ username = "%s"
+ password = "%s"
+ validate_certs = false
+ },
+ ]
+}
+
+resource "netapp-ontap_storage_flexcache_resource" "example" {
+ cx_profile_name = "cluster5"
+ name = "%s"
+ svm_name = "%s"
+
+ origins = [
+ {
+ volume = {
+ name = "acc_test_storage_flexcache_origin_volume"
+ },
+ svm = {
+ name = "acc_test"
+ }
+ }
+ ]
+ size = 200
+ size_unit = "mb"
+ guarantee = {
+ type = "none"
+ }
+ dr_cache = false
+ global_file_locking_enabled = false
+ aggregates = [
+ {
+ name = "acc_test"
+ }
+ ]
+}`, host, admin, password, volName, svm)
+}
diff --git a/internal/provider/storage_flexcaches_data_source.go b/internal/provider/storage_flexcaches_data_source.go
new file mode 100644
index 00000000..998e02db
--- /dev/null
+++ b/internal/provider/storage_flexcaches_data_source.go
@@ -0,0 +1,373 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/interfaces"
+ "github.com/netapp/terraform-provider-netapp-ontap/internal/utils"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces
+var _ datasource.DataSource = &StorageFlexcachesDataSource{}
+
+// NewStorageFlexcachesDataSource is a helper function to simplify the provider implementation.
+func NewStorageFlexcachesDataSource() datasource.DataSource {
+ return &StorageFlexcachesDataSource{
+ config: resourceOrDataSourceConfig{
+ name: "storage_flexcaches_data_source",
+ },
+ }
+}
+
+// StorageFlexcachesDataSource defines the resource implementation.
+type StorageFlexcachesDataSource struct {
+ config resourceOrDataSourceConfig
+}
+
+// StorageFlexcachesDataSourceModel describes the resource data model.
+type StorageFlexcachesDataSourceModel struct {
+ CxProfileName types.String `tfsdk:"cx_profile_name"`
+ StorageFlexcaches []StorageFlexcacheDataSourceModel `tfsdk:"storage_flexcaches"`
+ Filter *StorageFlexcacheDataSourceFilterModel `tfsdk:"filter"`
+}
+
+// StorageFlexcacheDataSourceFilterModel describes the data source data model for queries.
+type StorageFlexcacheDataSourceFilterModel struct {
+ Name types.String `tfsdk:"name"`
+ SVMName types.String `tfsdk:"svm_name"`
+}
+
+// Metadata returns the resource type name.
+func (r *StorageFlexcachesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_" + r.config.name
+}
+
+// Schema defines the schema for the resource.
+func (r *StorageFlexcachesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ // This description is used by the documentation generator and the language server.
+ MarkdownDescription: "Flexcache resource",
+
+ Attributes: map[string]schema.Attribute{
+ "cx_profile_name": schema.StringAttribute{
+ MarkdownDescription: "Connection profile name",
+ Required: true,
+ },
+ "filter": schema.SingleNestedAttribute{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "StorageFlexcache name",
+ Optional: true,
+ },
+ "svm_name": schema.StringAttribute{
+ MarkdownDescription: "StorageFlexcache svm name",
+ Optional: true,
+ },
+ },
+ Optional: true,
+ },
+ "storage_flexcaches": schema.ListNestedAttribute{
+ Computed: true,
+ MarkdownDescription: "",
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "cx_profile_name": schema.StringAttribute{
+ MarkdownDescription: "Connection profile name",
+ Required: true,
+ },
+ "name": schema.StringAttribute{
+ MarkdownDescription: "The name of the flexcache volume to manage",
+ Required: true,
+ },
+ "svm_name": schema.StringAttribute{
+ MarkdownDescription: "Name of the svm to use",
+ Required: true,
+ },
+ "aggregates": schema.SetNestedAttribute{
+ MarkdownDescription: "",
+ Computed: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the aggregate",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the aggregate",
+ Computed: true,
+ },
+ },
+ },
+ },
+ "origins": schema.SetNestedAttribute{
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "volume": schema.SingleNestedAttribute{
+ MarkdownDescription: "Origin volume",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the origin volume",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the origin volume",
+ Computed: true,
+ },
+ },
+ },
+ "svm": schema.SingleNestedAttribute{
+ MarkdownDescription: "Origin volume SVM",
+ Required: true,
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Name of the origin volume SVM",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "ID of the origin volume SVM",
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ MarkdownDescription: "Set of the origin volumes",
+ Computed: true,
+ },
+ "junction_path": schema.StringAttribute{
+ MarkdownDescription: "Name of the junction path",
+ Computed: true,
+ },
+ "size": schema.Int64Attribute{
+ MarkdownDescription: "The size of the flexcache volume",
+ Computed: true,
+ },
+ "size_unit": schema.StringAttribute{
+ MarkdownDescription: "The unit used to interpret the size parameter",
+ Computed: true,
+ },
+ "constituents_per_aggregate": schema.Int64Attribute{
+ MarkdownDescription: "The number of constituents per aggregate",
+ Computed: true,
+ },
+ "dr_cache": schema.BoolAttribute{
+ MarkdownDescription: "The state of the dr cache",
+ Computed: true,
+ },
+ "guarantee": schema.SingleNestedAttribute{
+ MarkdownDescription: "The guarantee of the volume",
+ Computed: true,
+ Attributes: map[string]schema.Attribute{
+ "type": schema.StringAttribute{
+ MarkdownDescription: "The type of the guarantee",
+ Computed: true,
+ },
+ },
+ },
+ "global_file_locking_enabled": schema.BoolAttribute{
+ MarkdownDescription: "The state of the global file locking",
+ Computed: true,
+ },
+ "use_tiered_aggregate": schema.BoolAttribute{
+ MarkdownDescription: "The state of the use tiered aggregates",
+ Computed: true,
+ },
+ "id": schema.StringAttribute{
+ MarkdownDescription: "The UUID of the flexcache volume",
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+// Configure adds the provider configured client to the resource.
+func (r *StorageFlexcachesDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) {
+ // Prevent panic if the provider has not been configured.
+ if req.ProviderData == nil {
+ return
+ }
+
+ config, ok := req.ProviderData.(Config)
+ if !ok {
+ resp.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected Config, got: %T. Please report this issue to the provider developers.", req.ProviderData),
+ )
+ }
+ r.config.providerConfig = config
+}
+
+// Read refreshes the Terraform state with the latest data.
+func (r *StorageFlexcachesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
+ var data StorageFlexcachesDataSourceModel
+
+ // Read Terraform configuration data into the model
+ resp.Diagnostics.Append(req.Config.Get(ctx, &data)...)
+
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ errorHandler := utils.NewErrorHandler(ctx, &resp.Diagnostics)
+ // we need to defer setting the client until we can read the connection profile name
+ client, err := getRestClient(errorHandler, r.config, data.CxProfileName)
+ if err != nil {
+ // error reporting done inside NewClient
+ return
+ }
+ var filter *interfaces.StorageFlexcacheDataSourceFilterModel = nil
+ if data.Filter != nil {
+ filter = &interfaces.StorageFlexcacheDataSourceFilterModel{
+ Name: data.Filter.Name.ValueString(),
+ SVMName: data.Filter.SVMName.ValueString(),
+ }
+ }
+
+ restInfo, err := interfaces.GetStorageFlexcaches(errorHandler, *client, filter)
+ if err != nil {
+ return
+ }
+ data.StorageFlexcaches = make([]StorageFlexcacheDataSourceModel, len(restInfo))
+ for index, record := range restInfo {
+
+ vsize, vunits := interfaces.ByteFormat(int64(record.Size))
+
+ data.StorageFlexcaches[index] = StorageFlexcacheDataSourceModel{}
+ data.StorageFlexcaches[index].CxProfileName = data.CxProfileName
+ data.StorageFlexcaches[index].Name = types.StringValue(record.Name)
+ data.StorageFlexcaches[index].SvmName = types.StringValue(record.SVM.Name)
+ data.StorageFlexcaches[index].Size = types.Int64Value(int64(vsize))
+ data.StorageFlexcaches[index].SizeUnit = types.StringValue(vunits)
+ data.StorageFlexcaches[index].JunctionPath = types.StringValue(record.JunctionPath)
+ data.StorageFlexcaches[index].ConstituentsPerAggregate = types.Int64Value(int64(record.ConstituentsPerAggregate))
+ data.StorageFlexcaches[index].DrCache = types.BoolValue(record.DrCache)
+ data.StorageFlexcaches[index].GlobalFileLockingEnabled = types.BoolValue(record.GlobalFileLockingEnabled)
+ data.StorageFlexcaches[index].UseTieredAggregate = types.BoolValue(record.UseTieredAggregate)
+ data.StorageFlexcaches[index].ID = types.StringValue(record.UUID)
+
+ //guarantee
+ elementTypes := map[string]attr.Type{
+ "type": types.StringType,
+ }
+ elements := map[string]attr.Value{
+ "type": types.StringValue(record.Guarantee.Type),
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.StorageFlexcaches[index].Guarantee = objectValue
+
+ //origin
+ setElements := []attr.Value{}
+ for _, origin := range record.Origins {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedVolumeElements := map[string]attr.Value{
+ "name": types.StringValue(origin.Volume.Name),
+ "id": types.StringValue(origin.Volume.ID),
+ }
+ nestedSVMElements := map[string]attr.Value{
+ "name": types.StringValue(origin.SVM.Name),
+ "id": types.StringValue(origin.SVM.ID),
+ }
+ originVolumeObjectValue, diags := types.ObjectValue(nestedElementTypes, nestedVolumeElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ originSVMObjectValue, _ := types.ObjectValue(nestedElementTypes, nestedSVMElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ elementTypes := map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: nestedElementTypes},
+ "svm": types.ObjectType{AttrTypes: nestedElementTypes},
+ }
+ elements := map[string]attr.Value{
+ "volume": originVolumeObjectValue,
+ "svm": originSVMObjectValue,
+ }
+ objectValue, diags := types.ObjectValue(elementTypes, elements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ log.Printf("objectValue is: %#v", objectValue)
+ setElements = append(setElements, objectValue)
+ }
+
+ setValue, diags := types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "volume": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ "svm": types.ObjectType{AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }},
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.StorageFlexcaches[index].Origins = setValue
+
+ //aggregate
+ setElements = []attr.Value{}
+ log.Printf("flexcache.Aggregates is: %#v", record.Aggregates)
+ for _, aggregate := range record.Aggregates {
+ nestedElementTypes := map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ }
+ nestedElements := map[string]attr.Value{
+ "name": types.StringValue(aggregate.Name),
+ "id": types.StringValue(aggregate.ID),
+ }
+ objectValue, diags := types.ObjectValue(nestedElementTypes, nestedElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ setElements = append(setElements, objectValue)
+ }
+ setValue, diags = types.SetValue(types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "name": types.StringType,
+ "id": types.StringType,
+ },
+ }, setElements)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ data.StorageFlexcaches[index].Aggregates = setValue
+ }
+
+ // Write logs using the tflog package
+ // Documentation: https://terraform.io/plugin/log
+ tflog.Debug(ctx, fmt.Sprintf("read a data source: %#v", data))
+
+ // Save data into Terraform state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+
+}
diff --git a/internal/restclient/rest_client.go b/internal/restclient/rest_client.go
index ec44dfdc..cde73597 100644
--- a/internal/restclient/rest_client.go
+++ b/internal/restclient/rest_client.go
@@ -248,11 +248,17 @@ func (r *RestClient) Wait(uuid string) (int, RestResponse, error) {
} else if job.State == "success" {
return statusCode, RestResponse{}, nil
} else {
- if job.Error.Code != "" {
- errorMessage := fmt.Errorf("fail to get job status. Error code: %s. Message: %s, Target: %s", job.Error.Code, job.Error.Message, job.Error.Target)
- return statusCode, RestResponse{}, errorMessage
+ // if job struct ifself contains message and code, jobError struct might be empty. Vice versa.
+ if job.Error != (jobError{}) {
+ if job.Error.Code != "" {
+ errorMessage := fmt.Errorf("fail to get job status. Error code: %s. Message: %s, Target: %s", job.Error.Code, job.Error.Message, job.Error.Target)
+ return statusCode, RestResponse{}, errorMessage
+ }
+ return statusCode, RestResponse{}, fmt.Errorf("fail to get job status. Unknown error")
+ }
+ if job.Code != 0 {
+ return statusCode, RestResponse{}, fmt.Errorf("Job UUID %s failed. Error code: %d. Message: %s", uuid, job.Code, job.Message)
}
- return statusCode, RestResponse{}, fmt.Errorf("fail to get job status. Unknown error")
}
time.Sleep(10 * time.Second)
}
@@ -262,8 +268,10 @@ func (r *RestClient) Wait(uuid string) (int, RestResponse, error) {
// Job is ONTAP API job data structure
type Job struct {
- State string
- Error jobError
+ State string
+ Error jobError
+ Code int
+ Message string
}
type jobError struct {
diff --git a/scripts/generate_docs.py b/scripts/generate_docs.py
index 566a05c4..e3335507 100755
--- a/scripts/generate_docs.py
+++ b/scripts/generate_docs.py
@@ -56,7 +56,10 @@
"storage_volume_snapshot_data_source.md",
"storage_volume_resource.md",
"storage_volume_data_source.md",
- "storage_volume_snapshot_resource.md"],
+ "storage_volume_snapshot_resource.md",
+ "storage_flexcache_data_source.md",
+ "storage_flexcaches_data_source.md",
+ "storage_flexcache_resource.md"],
'support': [],
'svm': ["svm_resource.md",
"svm_peers_resource.md",