Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AzDatalake] Pipelines + cleanup #21298

Merged
merged 82 commits into from
Aug 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
82 commits
Select commit Hold shift + click to select a range
e8167a2
Enable gocritic during linting (#20715)
jhendrixMSFT Apr 28, 2023
86627ae
Cosmos DB: Enable merge support (#20716)
ealsur Apr 28, 2023
8ac8c6d
[azservicebus, azeventhubs] Stress test and logging improvement (#20710)
richardpark-msft May 1, 2023
9111616
update proxy version (#20712)
azure-sdk May 1, 2023
d6bf190
Return an error when you try to send a message that's too large. (#20…
richardpark-msft May 1, 2023
e2693bd
Changes in test that is failing in pipeline (#20693)
siminsavani-msft May 2, 2023
03f0ac3
[azservicebus, azeventhubs] Treat 'entity full' as a fatal error (#20…
richardpark-msft May 2, 2023
838842d
[azservicebus/azeventhubs] Redirect stderr and stdout to tee (#20726)
richardpark-msft May 3, 2023
20b4dd8
Update changelog with latest features (#20730)
jhendrixMSFT May 3, 2023
745d967
pass along the artifact name so we can override it later (#20732)
azure-sdk May 3, 2023
6dfd0cb
[azeventhubs] Fixing checkpoint store race condition (#20727)
richardpark-msft May 3, 2023
ed7f3c7
Fix azidentity troubleshooting guide link (#20736)
chlowell May 3, 2023
b2cddab
[Release] sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/0.1.0 (…
Alancere May 4, 2023
2a8d96d
add sdk/resourcemanager/postgresql/armpostgresql live test (#20685)
Alancere May 4, 2023
0d22aed
add sdk/resourcemanager/eventhub/armeventhub live test (#20686)
Alancere May 4, 2023
5fa7df4
add sdk/resourcemanager/compute/armcompute live test (#20048)
Alancere May 4, 2023
c005ed6
sdk/resourcemanager/network/armnetwork live test (#20331)
Alancere May 4, 2023
36f766d
add sdk/resourcemanager/cosmos/armcosmos live test (#20705)
Alancere May 4, 2023
9c9d62a
Increment package version after release of azcore (#20740)
azure-sdk May 4, 2023
8bc3450
[azeventhubs] Improperly resetting etag in the checkpoint store (#20737)
richardpark-msft May 4, 2023
e1a6152
Eng workflows sync and branch cleanup additions (#20743)
azure-sdk May 4, 2023
04b463d
[azeventhubs] Latest start position can also be inclusive (ie, get th…
richardpark-msft May 4, 2023
8849196
Update GitHubEventProcessor version and remove pull_request_review pr…
azure-sdk May 5, 2023
27f5ee0
Rename DisableAuthorityValidationAndInstanceDiscovery (#20746)
chlowell May 5, 2023
2eec707
fix (#20707)
Alancere May 6, 2023
22db2d4
AzFile (#20739)
souravgupta-msft May 8, 2023
0cbfd88
azfile: Fixing connection string parsing logic (#20798)
souravgupta-msft May 8, 2023
d54fb08
[azadmin] fix flaky test (#20758)
gracewilcox May 8, 2023
ad8ebd9
Prepare azidentity v1.3.0 for release (#20756)
chlowell May 8, 2023
e2a6f70
Fix broken podman link (#20801)
azure-sdk May 8, 2023
a59d912
[azquery] update doc comments (#20755)
gracewilcox May 8, 2023
bd3b467
Fixed contribution section (#20752)
bobtabor-msft May 8, 2023
132a01a
[azeventhubs,azservicebus] Some API cleanup, renames (#20754)
richardpark-msft May 8, 2023
8db51ca
Add supporting features to enable distributed tracing (#20301) (#20708)
jhendrixMSFT May 9, 2023
4a66b4f
Restore ARM CAE support for azcore beta (#20657)
chlowell May 9, 2023
7d4a3cb
Upgrade to stable azcore (#20808)
chlowell May 9, 2023
068c3be
Increment package version after release of data/azcosmos (#20807)
azure-sdk May 9, 2023
8e0f66e
Updating changelog (#20810)
souravgupta-msft May 9, 2023
ce926c4
Add fake package to azcore (#20711)
jhendrixMSFT May 9, 2023
1a145c5
Updating CHANGELOG.md (#20809)
siminsavani-msft May 9, 2023
90dfc5c
changelog (#20811)
tasherif-msft May 9, 2023
c7eda59
Increment package version after release of storage/azfile (#20813)
azure-sdk May 9, 2023
7fac0b5
Update changelog (azblob) (#20815)
siminsavani-msft May 9, 2023
498a2ef
[azquery] migration guide (#20742)
gracewilcox May 9, 2023
ccb967e
Increment package version after release of monitor/azquery (#20820)
azure-sdk May 9, 2023
f4e6a22
[keyvault] prep for release (#20819)
gracewilcox May 10, 2023
8fd8eda
Merge branch 'main' into feature/azdatalake
tasherif-msft May 11, 2023
c94fa00
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft May 11, 2023
fc0b2b5
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jun 12, 2023
6fb1694
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jun 19, 2023
4f7fe43
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jun 26, 2023
3dac9d0
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jul 4, 2023
a0a861b
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jul 7, 2023
124e27e
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jul 19, 2023
0f5a52c
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jul 24, 2023
81dabb1
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jul 27, 2023
d87e78b
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Jul 31, 2023
1fb3818
ci pipeline
tasherif-msft Jul 31, 2023
0c4178d
code cov
tasherif-msft Jul 31, 2023
5b70b99
test resources file
tasherif-msft Jul 31, 2023
1628f26
Merge remote-tracking branch 'upstream/feature/azdatalake' into featu…
tasherif-msft Aug 1, 2023
ff659b4
Merge branch 'feature/azdatalake' into ci-pipe
tasherif-msft Aug 1, 2023
78ef57d
fix ci
tasherif-msft Aug 1, 2023
32d6eba
ci fix
tasherif-msft Aug 1, 2023
97d524d
ci fix
tasherif-msft Aug 1, 2023
3453e0d
ci fix
tasherif-msft Aug 2, 2023
37484ab
ci
tasherif-msft Aug 2, 2023
5b0257b
generated blobs
tasherif-msft Aug 2, 2023
235c09e
cleanup
tasherif-msft Aug 2, 2023
c013e0c
cleanup
tasherif-msft Aug 2, 2023
9467dcb
fix sas
tasherif-msft Aug 2, 2023
b1a6542
cleanup
tasherif-msft Aug 2, 2023
2cd4d8a
cleanup
tasherif-msft Aug 2, 2023
f413358
cleanup
tasherif-msft Aug 2, 2023
9fd1096
added tests for recursive acls
tasherif-msft Aug 3, 2023
082d401
added one more tests for dir
tasherif-msft Aug 3, 2023
8c74c4a
added more tests
tasherif-msft Aug 8, 2023
0b2c015
added more tests
tasherif-msft Aug 8, 2023
8d9b6e2
tests
tasherif-msft Aug 9, 2023
f3cdb9a
tests
tasherif-msft Aug 9, 2023
9621c44
added more tests for fs and fixed some linting issues
tasherif-msft Aug 10, 2023
2b5b342
cleanup
tasherif-msft Aug 10, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions eng/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@
"Name": "azqueue",
"CoverageGoal": 0.60
},
{
"Name": "azdatalake",
"CoverageGoal": 0.60
},
{
"Name": "azfile",
"CoverageGoal": 0.75
Expand Down
21 changes: 21 additions & 0 deletions sdk/storage/azdatalake/LICENSE.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) Microsoft Corporation. All rights reserved.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
2 changes: 1 addition & 1 deletion sdk/storage/azdatalake/assets.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azdatalake",
"Tag": "go/storage/azdatalake_78f150eb1d"
"Tag": "go/storage/azdatalake_ec80a91bf3"
}
28 changes: 28 additions & 0 deletions sdk/storage/azdatalake/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
trigger:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/storage/azdatalake

pr:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/storage/azdatalake


stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: 'storage/azdatalake'
RunLiveTests: true
128 changes: 96 additions & 32 deletions sdk/storage/azdatalake/directory/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,14 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated_blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/path"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/sas"
Expand All @@ -29,7 +32,7 @@ import (
type ClientOptions base.ClientOptions

// Client represents a URL to the Azure Datalake Storage service.
type Client base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client]
type Client base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client]

// NewClient creates an instance of Client with the specified values.
// - directoryURL - the URL of the directory e.g. https://<account>.dfs.core.windows.net/fs/dir
Expand Down Expand Up @@ -128,12 +131,15 @@ func NewClientWithSharedKeyCredential(directoryURL string, cred *SharedKeyCreden
// NewClientFromConnectionString creates an instance of Client with the specified values.
// - connectionString - a connection string for the desired storage account
// - options - client options; pass nil to accept the default values
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
func NewClientFromConnectionString(connectionString string, dirPath, fsName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}

dirPath = strings.ReplaceAll(dirPath, "\\", "/")
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, fsName, dirPath)

if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
Expand All @@ -146,31 +152,30 @@ func NewClientFromConnectionString(connectionString string, options *ClientOptio
}

func (d *Client) generatedDirClientWithDFS() *generated.PathClient {
//base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
dirClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(d))
dirClientWithDFS, _, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d))
return dirClientWithDFS
}

func (d *Client) generatedDirClientWithBlob() *generated.PathClient {
_, dirClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(d))
func (d *Client) generatedDirClientWithBlob() *generated_blob.BlobClient {
_, dirClientWithBlob, _ := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d))
return dirClientWithBlob
}

func (d *Client) blobClient() *blockblob.Client {
_, _, blobClient := base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(d))
_, _, blobClient := base.InnerClients((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d))
return blobClient
}

func (d *Client) getClientOptions() *base.ClientOptions {
return base.GetCompositeClientOptions((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(d))
return base.GetCompositeClientOptions((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d))
}

func (d *Client) sharedKey() *exported.SharedKeyCredential {
return base.SharedKeyComposite((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(d))
return base.SharedKeyComposite((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d))
}

func (d *Client) identityCredential() *azcore.TokenCredential {
return base.IdentityCredentialComposite((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(d))
return base.IdentityCredentialComposite((*base.CompositeClient[generated.PathClient, generated_blob.BlobClient, blockblob.Client])(d))
}

// DFSURL returns the URL endpoint used by the Client object.
Expand All @@ -183,7 +188,27 @@ func (d *Client) BlobURL() string {
return d.generatedDirClientWithBlob().Endpoint()
}

//TODO: create method to get file client - this will require block blob to have a method to get another block blob
// NewFileClient creates a new file.Client object by concatenating fileName to the end of this Client's URL.
// The new file.Client uses the same request policy pipeline as the Client.
func (d *Client) NewFileClient(fileName string) (*file.Client, error) {
fileName = url.PathEscape(fileName)
fileURL := runtime.JoinPaths(d.DFSURL(), fileName)
newBlobURL, fileURL := shared.GetURLs(fileURL)
var newBlobClient *blockblob.Client
var err error
if d.identityCredential() != nil {
newBlobClient, err = blockblob.NewClient(newBlobURL, *d.identityCredential(), nil)
} else if d.sharedKey() != nil {
blobSharedKey, _ := d.sharedKey().ConvertToBlobSharedKey()
newBlobClient, err = blockblob.NewClientWithSharedKeyCredential(newBlobURL, blobSharedKey, nil)
} else {
newBlobClient, err = blockblob.NewClientWithNoCredential(newBlobURL, nil)
}
if err != nil {
return nil, exported.ConvertToDFSError(err)
}
return (*file.Client)(base.NewPathClient(fileURL, newBlobURL, newBlobClient, d.generatedDirClientWithDFS().InternalClient().WithClientName(shared.FileClient), d.sharedKey(), d.identityCredential(), d.getClientOptions())), nil
}

// Create creates a new directory (dfs1).
func (d *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) {
Expand Down Expand Up @@ -260,12 +285,12 @@ func (d *Client) SetAccessControl(ctx context.Context, options *SetAccessControl
return resp, err
}

func (d *Client) setAccessControlHelper(mode generated.PathSetAccessControlRecursiveMode, listOptions *generated.PathClientSetAccessControlRecursiveOptions) *runtime.Pager[SetAccessControlRecursiveResponse] {
return runtime.NewPager(runtime.PagingHandler[SetAccessControlRecursiveResponse]{
More: func(page SetAccessControlRecursiveResponse) bool {
func (d *Client) setAccessControlPager(mode generated.PathSetAccessControlRecursiveMode, listOptions *generated.PathClientSetAccessControlRecursiveOptions) *runtime.Pager[generated.PathClientSetAccessControlRecursiveResponse] {
return runtime.NewPager(runtime.PagingHandler[generated.PathClientSetAccessControlRecursiveResponse]{
More: func(page generated.PathClientSetAccessControlRecursiveResponse) bool {
return page.Continuation != nil && len(*page.Continuation) > 0
},
Fetcher: func(ctx context.Context, page *SetAccessControlRecursiveResponse) (SetAccessControlRecursiveResponse, error) {
Fetcher: func(ctx context.Context, page *generated.PathClientSetAccessControlRecursiveResponse) (generated.PathClientSetAccessControlRecursiveResponse, error) {
var req *policy.Request
var err error
if page == nil {
Expand All @@ -277,15 +302,15 @@ func (d *Client) setAccessControlHelper(mode generated.PathSetAccessControlRecur
err = exported.ConvertToDFSError(err)
}
if err != nil {
return SetAccessControlRecursiveResponse{}, err
return generated.PathClientSetAccessControlRecursiveResponse{}, err
}
resp, err := d.generatedDirClientWithDFS().InternalClient().Pipeline().Do(req)
err = exported.ConvertToDFSError(err)
if err != nil {
return SetAccessControlRecursiveResponse{}, err
return generated.PathClientSetAccessControlRecursiveResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return SetAccessControlRecursiveResponse{}, runtime.NewResponseError(resp)
return generated.PathClientSetAccessControlRecursiveResponse{}, runtime.NewResponseError(resp)
}
newResp, err := d.generatedDirClientWithDFS().SetAccessControlRecursiveHandleResponse(resp)
return newResp, exported.ConvertToDFSError(err)
Expand All @@ -294,22 +319,61 @@ func (d *Client) setAccessControlHelper(mode generated.PathSetAccessControlRecur

}

// NewSetAccessControlRecursivePager sets the owner, owning group, and permissions for a directory (dfs1).
func (d *Client) NewSetAccessControlRecursivePager(ACL string, options *SetAccessControlRecursiveOptions) *runtime.Pager[SetAccessControlRecursiveResponse] {
func (d *Client) setAccessControlRecursiveHelper(mode generated.PathSetAccessControlRecursiveMode, listOptions *generated.PathClientSetAccessControlRecursiveOptions, options *SetAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) {
pager := d.setAccessControlPager(mode, listOptions)
counter := *options.MaxBatches
continueOnFailure := listOptions.ForceFlag
totalSuccessfulDirs := int32(0)
totalSuccessfulFiles := int32(0)
totalFailureCount := int32(0)
finalResponse := SetAccessControlRecursiveResponse{
DirectoriesSuccessful: &totalSuccessfulDirs,
FilesSuccessful: &totalSuccessfulFiles,
FailureCount: &totalFailureCount,
FailedEntries: []*ACLFailedEntry{},
}
for pager.More() && counter != 0 {
resp, err := pager.NextPage(context.Background())
if err != nil {
return finalResponse, exported.ConvertToDFSError(err)
}
finalResponse.DirectoriesSuccessful = to.Ptr(*finalResponse.DirectoriesSuccessful + *resp.DirectoriesSuccessful)
finalResponse.FilesSuccessful = to.Ptr(*finalResponse.FilesSuccessful + *resp.FilesSuccessful)
finalResponse.FailureCount = to.Ptr(*finalResponse.FailureCount + *resp.FailureCount)
finalResponse.FailedEntries = append(finalResponse.FailedEntries, resp.FailedEntries...)
counter = counter - 1
if !*continueOnFailure && *resp.FailureCount > 0 {
return finalResponse, exported.ConvertToDFSError(err)
}
}
return finalResponse, nil
}

// SetAccessControlRecursive sets the owner, owning group, and permissions for a directory (dfs1).
func (d *Client) SetAccessControlRecursive(ACL string, options *SetAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) {
if options == nil {
options = &SetAccessControlRecursiveOptions{}
}
mode, listOptions := options.format(ACL, "set")
return d.setAccessControlHelper(mode, listOptions)
return d.setAccessControlRecursiveHelper(mode, listOptions, options)
}

// NewUpdateAccessControlRecursivePager updates the owner, owning group, and permissions for a directory (dfs1).
func (d *Client) NewUpdateAccessControlRecursivePager(ACL string, options *UpdateAccessControlRecursiveOptions) *runtime.Pager[UpdateAccessControlRecursiveResponse] {
// UpdateAccessControlRecursive updates the owner, owning group, and permissions for a directory (dfs1).
func (d *Client) UpdateAccessControlRecursive(ACL string, options *UpdateAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) {
if options == nil {
options = &UpdateAccessControlRecursiveOptions{}
}
mode, listOptions := options.format(ACL, "modify")
return d.setAccessControlHelper(mode, listOptions)
return d.setAccessControlRecursiveHelper(mode, listOptions, options)
}

// NewRemoveAccessControlRecursivePager removes the owner, owning group, and permissions for a directory (dfs1).
func (d *Client) NewRemoveAccessControlRecursivePager(ACL string, options *RemoveAccessControlRecursiveOptions) *runtime.Pager[RemoveAccessControlRecursiveResponse] {
// RemoveAccessControlRecursive removes the owner, owning group, and permissions for a directory (dfs1).
func (d *Client) RemoveAccessControlRecursive(ACL string, options *RemoveAccessControlRecursiveOptions) (SetAccessControlRecursiveResponse, error) {
if options == nil {
options = &RemoveAccessControlRecursiveOptions{}
}
mode, listOptions := options.format(ACL, "remove")
return d.setAccessControlHelper(mode, listOptions)
return d.setAccessControlRecursiveHelper(mode, listOptions, options)
}

// GetAccessControl gets the owner, owning group, and permissions for a directory (dfs1).
Expand Down Expand Up @@ -340,12 +404,12 @@ func (d *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, op

// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (f *Client) GetSASURL(permissions sas.DirectoryPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) {
if f.sharedKey() == nil {
func (d *Client) GetSASURL(permissions sas.DirectoryPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) {
if d.sharedKey() == nil {
return "", datalakeerror.MissingSharedKeyCredential
}

urlParts, err := sas.ParseURL(f.BlobURL())
urlParts, err := sas.ParseURL(d.BlobURL())
err = exported.ConvertToDFSError(err)
if err != nil {
return "", err
Expand All @@ -360,14 +424,14 @@ func (f *Client) GetSASURL(permissions sas.DirectoryPermissions, expiry time.Tim
Permissions: permissions.String(),
StartTime: st,
ExpiryTime: expiry.UTC(),
}.SignWithSharedKey(f.sharedKey())
}.SignWithSharedKey(d.sharedKey())

err = exported.ConvertToDFSError(err)
if err != nil {
return "", err
}

endpoint := f.BlobURL() + "?" + qps.Encode()
endpoint := d.BlobURL() + "?" + qps.Encode()

return endpoint, nil
}
Loading
Loading