Skip to content

Commit

Permalink
Enable cluster deployment with gp3 volume for AWS & Biganimal cloud p…
Browse files Browse the repository at this point in the history
…rovider.#6323
  • Loading branch information
yogeshmahajan-1903 committed Jul 7, 2023
1 parent 70d356d commit 471cd66
Show file tree
Hide file tree
Showing 10 changed files with 112 additions and 61 deletions.
2 changes: 2 additions & 0 deletions docs/en_US/cloud_edb_biganimal.rst
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ details.

* Use the *Volume IOPS* field to specify the storage IOPS. This field is specific to AWS.

* Use the *Disk throughput* field to specify the disk throughput. This field is specific to AWS.


.. image:: images/cloud_biganimal_database.png
:alt: Cloud Deployment Provider
Expand Down
Binary file modified docs/en_US/images/cloud_biganimal_instance.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
52 changes: 35 additions & 17 deletions web/pgacloud/providers/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,23 +199,41 @@ def _create_rds_instance(self, args, security_group):

try:
debug('Creating RDS instance: {}...'.format(args.name))
rds.create_db_instance(DBInstanceIdentifier=args.name,
AllocatedStorage=args.storage_size,
DBName=args.db_name,
Engine='postgres',
Port=args.db_port,
EngineVersion=args.db_version,
StorageType=args.storage_type,
StorageEncrypted=True,
Iops=args.storage_iops,
AutoMinorVersionUpgrade=True,
MultiAZ=bool(args.high_availability),
MasterUsername=args.db_username,
MasterUserPassword=db_password,
DBInstanceClass=args.instance_type,
VpcSecurityGroupIds=[
security_group,
])
if args.storage_type == 'gp3' and args.storage_size > 400:
rds.create_db_instance(DBInstanceIdentifier=args.name,
AllocatedStorage=args.storage_size,
DBName=args.db_name,
Engine='postgres',
Port=args.db_port,
EngineVersion=args.db_version,
StorageType=args.storage_type,
StorageEncrypted=True,
Iops=args.storage_iops,
AutoMinorVersionUpgrade=True,
MultiAZ=bool(args.high_availability),
MasterUsername=args.db_username,
MasterUserPassword=db_password,
DBInstanceClass=args.instance_type,
VpcSecurityGroupIds=[
security_group,
])
else:
rds.create_db_instance(DBInstanceIdentifier=args.name,
AllocatedStorage=args.storage_size,
DBName=args.db_name,
Engine='postgres',
Port=args.db_port,
EngineVersion=args.db_version,
StorageType=args.storage_type,
StorageEncrypted=True,
AutoMinorVersionUpgrade=True,
MultiAZ=bool(args.high_availability),
MasterUsername=args.db_username,
MasterUserPassword=db_password,
DBInstanceClass=args.instance_type,
VpcSecurityGroupIds=[
security_group,
])

except rds.exceptions.DBInstanceAlreadyExistsFault as e:
try:
Expand Down
6 changes: 5 additions & 1 deletion web/pgacloud/providers/biganimal.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,9 @@ def init_args(self, parsers):
parser_create_instance.add_argument('--volume-IOPS',
required=True,
help='storage IOPS')
parser_create_instance.add_argument('--throughput',
required=False,
help='Disk throughput')
parser_create_instance.add_argument('--private-network', required=True,
help='Private or Public Network')
parser_create_instance.add_argument('--public-ip', default='',
Expand Down Expand Up @@ -128,7 +131,8 @@ def cmd_create_instance(self, args):
'volumePropertiesId': args.volume_properties,
'volumeTypeId': args.volume_type,
'iops': args.volume_IOPS,
'size': args.volume_size + ' Gi'
'size': args.volume_size + ' Gi',
'throughput': args.throughput
},
'clusterArchitecture': {
'clusterArchitectureId': args.cluster_arch,
Expand Down
5 changes: 3 additions & 2 deletions web/pgadmin/misc/cloud/biganimal/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
from pgadmin.utils import PgAdminModule
from pgadmin.misc.cloud.utils import _create_server, CloudProcessDesc
from pgadmin.misc.bgprocess.processes import BatchProcess
from pgadmin.utils.ajax import make_json_response,\
internal_server_error, bad_request, success_return
from pgadmin.utils.ajax import make_json_response
from config import root
from pgadmin.utils.constants import MIMETYPE_APP_JSON

Expand Down Expand Up @@ -495,6 +494,8 @@ def deploy_on_biganimal(data):
str(data['instance_details'].get('volume_size', None)),
'--volume-IOPS',
str(data['instance_details'].get('volume_IOPS', None)),
'--throughput',
str(data['instance_details'].get('disk_throughput', None)),
'--instance-type',
str(_instance_size),
'--private-network',
Expand Down
2 changes: 1 addition & 1 deletion web/pgadmin/misc/cloud/rds/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def get_db_instances():
)

if not eng_version or eng_version == '' or eng_version == 'undefined':
eng_version = '10.17'
eng_version = '11.16'

rds_obj = pickle.loads(session['aws']['aws_rds_obj'])
res = rds_obj.get_available_db_instance_class(
Expand Down
3 changes: 2 additions & 1 deletion web/pgadmin/misc/cloud/static/js/aws.js
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,8 @@ const getStorageType = (cloudInstanceDetails) => {
let _storage_type = 'General Purpose SSD (gp2)',
_io1 = undefined;

if(cloudInstanceDetails.storage_type == 'gp2') _storage_type = 'General Purpose SSD (gp2)';
if(cloudInstanceDetails.storage_type == 'gp2'){ _storage_type = 'General Purpose SSD (gp2)';}
else if (cloudInstanceDetails.storage_type == 'gp3'){ _storage_type = 'General Purpose SSD (gp3)';}
else if(cloudInstanceDetails.storage_type == 'io1') {
_storage_type = 'Provisioned IOPS SSD (io1)';
_io1 = cloudInstanceDetails.storage_IOPS;
Expand Down
19 changes: 18 additions & 1 deletion web/pgadmin/misc/cloud/static/js/aws_schema.ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ export class StorageSchema extends BaseUISchema {
id: 'storage_type', label: gettext('Storage type'), type: 'select',
mode: ['create'],
options: [
{'label': gettext('General Purpose SSD (gp3)'), 'value': 'gp3'},
{'label': gettext('General Purpose SSD (gp2)'), 'value': 'gp2'},
{'label': gettext('Provisioned IOPS SSD (io1)'), 'value': 'io1'},
{'label': gettext('Magnetic'), 'value': 'standard'}
Expand All @@ -281,7 +282,7 @@ export class StorageSchema extends BaseUISchema {
if (source[0] !== 'storage_size')
if(state.storage_type === 'io1') {
return {storage_size: 100};
} else if(state.storage_type === 'gp2') {
} else if(state.storage_type === 'gp2' || state.storage_type === 'gp3') {
return {storage_size: 20};
} else {
return {storage_size: 5};
Expand All @@ -304,6 +305,22 @@ export class StorageSchema extends BaseUISchema {
},
];
}

validate(data, setErrMsg) {
if(!isEmptyString(data.storage_type) && (data.storage_type === 'gp2' || data.storage_type === 'gp3') && !isEmptyString(data.storage_size) && (data.storage_size < 4 || data.storage_size > 65536)) {
setErrMsg('storage_size', gettext('Allocated storage should be between 20 - 65536 GiB.'));
return true;
}
if(!isEmptyString(data.storage_type) && data.storage_type === 'io1' && !isEmptyString(data.storage_size) && (data.storage_size < 100 || data.storage_size > 65536)) {
setErrMsg('storage_size', gettext('Allocated storage should be between 100 - 65536 GiB.'));
return true;
}
if(!isEmptyString(data.storage_type) && data.storage_type === 'standard' && !isEmptyString(data.storage_size) && (data.storage_size < 5 || data.storage_size > 3072)) {
setErrMsg('storage_size', gettext('Allocated storage should be between 5 - 3072 GiB.'));
return true;
}
return false;
}
}

export class HighAvailablity extends BaseUISchema {
Expand Down
20 changes: 6 additions & 14 deletions web/pgadmin/misc/cloud/static/js/biganimal.js
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,12 @@ export function getBigAnimalSummary(cloud, bigAnimalClusterTypeData, bigAnimalIn

const rows4 = [
createData(gettext('Volume type'), bigAnimalInstanceData.volume_type),
createData(gettext('Volume size'), bigAnimalInstanceData.volume_size),
createData(gettext('Volume IOPS'), bigAnimalInstanceData.volume_IOPS),
createData(gettext('Volume size'), bigAnimalInstanceData.volume_size)
];
if(bigAnimalClusterTypeData.provider.includes('aws')){
rows4.push(createData(gettext('Volume IOPS'), bigAnimalInstanceData.volume_IOPS));
rows4.push(createData(gettext('Disk Throuhgput'), bigAnimalInstanceData.disk_throughput));
}

const rows5 = [
createData(gettext('Password'), 'xxxxxxx'),
Expand All @@ -271,20 +274,9 @@ export function validateBigAnimalStep3(cloudDetails) {
if (isEmptyString(cloudDetails.name) ||
isEmptyString(cloudDetails.region) || isEmptyString(cloudDetails.instance_type) ||
isEmptyString(cloudDetails.instance_series)|| isEmptyString(cloudDetails.instance_size) ||
isEmptyString(cloudDetails.volume_type) || (cloudDetails.provider != 'aws' && isEmptyString(cloudDetails.volume_properties)) ) {
isEmptyString(cloudDetails.volume_type) || (!cloudDetails.provider.includes('aws') && isEmptyString(cloudDetails.volume_properties)) ) {
isError = true;
}

if (cloudDetails.provider == 'aws') {
if (isEmptyString(cloudDetails.volume_IOPS) || (cloudDetails.volume_IOPS != 'io2' &&
(cloudDetails.volume_size < 1 || cloudDetails.volume_size > 16384)) ||
(cloudDetails.volume_IOPS == 'io2' && (cloudDetails.volume_size < 4 || cloudDetails.volume_size > 16384)) ||
(cloudDetails.volume_IOPS != 'io2' && cloudDetails.volume_IOPS != 3000) ||
(cloudDetails.volume_IOPS == 'io2' && (cloudDetails.volume_IOPS < 100 || cloudDetails.volume_IOPS > 2000))) {
isError = true;
}
}

return isError;
}

Expand Down
64 changes: 40 additions & 24 deletions web/pgadmin/misc/cloud/static/js/biganimal_schema.ui.js
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,7 @@ class BigAnimalVolumeSchema extends BaseUISchema {
volume_properties: '',
volume_size: 4,
volume_IOPS: '',
disk_throughput: 125,
...initValues
});

Expand All @@ -228,36 +229,43 @@ class BigAnimalVolumeSchema extends BaseUISchema {
return 'oid';
}

validate(data, setErrMsg) {
if (data.provider != CLOUD_PROVIDERS.AWS && isEmptyString(data.volume_properties)) {
setErrMsg('replicas', gettext('Please select volume properties.'));
validate(data, setErrMsg) {
if (!data.provider.includes(CLOUD_PROVIDERS.AWS) && isEmptyString(data.volume_properties)) {
setErrMsg('volume_properties', gettext('Please select volume properties.'));
return true;
}
if (data.provider == CLOUD_PROVIDERS.AWS) {
if (isEmptyString(data.volume_IOPS)) {
setErrMsg('replicas', gettext('Please select volume IOPS.'));
return true;
}
if (data.provider.includes(CLOUD_PROVIDERS.AWS)) {
if (!isEmptyString(data.volume_size)) {
if( data.volume_IOPS != 'io2' && (data.volume_size < 1 || data.volume_size > 16384)) {
setErrMsg('replicas', gettext('Please enter the volume size in the range between 1 tp 16384.'));
if( data.volume_type != 'io2' && (data.volume_size < 1 || data.volume_size > 16384)) {
setErrMsg('volume_size', gettext('Please enter the volume size in the range between 1 to 16384.'));
return true;
}
if (data.volume_IOPS == 'io2' && (data.volume_size < 4 || data.volume_size > 16384)) {
setErrMsg('replicas', gettext('Please enter the volume size in the range between 4 tp 16384.'));
if (data.volume_type == 'io2' && (data.volume_size < 4 || data.volume_size > 16384)) {
setErrMsg('volume_size', gettext('Please enter the volume size in the range between 4 to 16384.'));
return true;
}
}
if (!isEmptyString(data.volume_IOPS)) {
if(data.volume_IOPS != 'io2' && data.volume_IOPS != 3000) {
setErrMsg('replicas', gettext('Please enter the volume IOPS 3000.'));
if (isEmptyString(data.volume_IOPS)) {
setErrMsg('volume_IOPS', gettext('Please enter volume IOPS.'));
return true;
}
else if (!isEmptyString(data.volume_IOPS)) {
if(data.volume_type == 'io2' && (data.volume_IOPS < 100 || data.volume_IOPS > Math.min(data.volume_size*500, 64000))) {
let errMsg = 'Please enter the volume IOPS in the range between 100 to ' + Math.min(data.volume_size*500, 64000) + '.';
setErrMsg('volume_IOPS', gettext(errMsg));
return true;
}
if(data.volume_IOPS == 'io2' && (data.volume_IOPS < 100 || data.volume_IOPS > 2000)) {
setErrMsg('replicas', gettext('Please enter the volume IOPS in the range between 100 tp 2000.'));
if(data.volume_type == 'gp3' && (data.volume_IOPS < 3000 || data.volume_IOPS > Math.min(Math.max(data.volume_size*500, 3000), 16000))) {
let errMsg = 'Please enter the volume IOPS in the range between 3000 to ' + Math.min(Math.max(data.volume_size*500, 3000), 16000) + '.';
setErrMsg('volume_IOPS', gettext(errMsg));
return true;
}
}
if ( data.volume_type === 'gp3' && !isEmptyString(data.disk_throughput) && (data.disk_throughput < 125 || data.disk_throughput > Math.min( ((data.volume_IOPS - 3000)/100*25 + 750) , 1000))) {
let errMsg = 'Please enter the Disk throughput in the range between 125 to ' + Math.min( ((data.volume_IOPS - 3000)/100*25+750) , 1000) + '.';
setErrMsg('disk_throughput', gettext(errMsg));
return true;
}
}
return false;
}
Expand Down Expand Up @@ -295,36 +303,35 @@ class BigAnimalVolumeSchema extends BaseUISchema {
};
},
visible: (state) => {
return state.provider !== CLOUD_PROVIDERS.AWS;
return state.provider === CLOUD_PROVIDERS.AZURE;
},
}, {
id: 'volume_size', label: gettext('Size'), type: 'text',
mode: ['create'], noEmpty: true, deps: ['volume_type'],
depChange: (state, source)=> {
obj.volumeType = state.volume_type;
if (source[0] !== 'volume_size') {
if(state.volume_type == 'io2' || state.provider === CLOUD_PROVIDERS.AZURE) {
if(state.volume_type == 'io2') {
return {volume_size: 4};
} else {
return {volume_size: 1};
}
}
},
visible: (state) => {
return state.provider === CLOUD_PROVIDERS.AWS;
return state.provider && state.provider.includes(CLOUD_PROVIDERS.AWS);
},
helpMessage: obj.volumeType == 'io2' ? gettext('Size (4-16,384 GiB)') : gettext('Size (1-16,384 GiB)')
}, {
id: 'volume_IOPS', label: gettext('IOPS'), type: 'text',
mode: ['create'],
helpMessage: obj.volumeType == 'io2' ? gettext('IOPS (100-2,000)') : gettext('IOPS (3,000-3,000)'),
visible: (state) => {
return state.provider === CLOUD_PROVIDERS.AWS;
return state.provider && state.provider.includes(CLOUD_PROVIDERS.AWS);
}, deps: ['volume_type'],
depChange: (state, source) => {
obj.volumeType = state.volume_type;
if (source[0] !== 'volume_IOPS') {
if (state.provider === CLOUD_PROVIDERS.AWS) {
if (state.provider.includes(CLOUD_PROVIDERS.AWS)) {
if(state.volume_type === 'io2') {
return {volume_IOPS: 100};
} else {
Expand All @@ -335,7 +342,15 @@ class BigAnimalVolumeSchema extends BaseUISchema {
}
}
},
},
},{
id: 'disk_throughput', label: gettext('Disk throughput'), type: 'text',
mode: ['create'],
helpMessage: gettext('Disk Throughput (125-1,000 MB/s)'),
deps : ['volume_type'],
visible: (state) => {
return state.provider && state.provider.includes(CLOUD_PROVIDERS.AWS) && state.volume_type === 'gp3';
}
}
];
}
}
Expand Down Expand Up @@ -426,6 +441,7 @@ class BigAnimalClusterSchema extends BaseUISchema {
region: '',
cloud_type: 'public',
biganimal_public_ip: initValues.hostIP,
disk_throughput: 125,
...initValues
});

Expand Down

0 comments on commit 471cd66

Please sign in to comment.