diff --git a/src/middlewared/middlewared/alembic/versions/25.04/2024-09-12_23-57_remove_nfs_v4_v3owner.py b/src/middlewared/middlewared/alembic/versions/25.04/2024-09-12_23-57_remove_nfs_v4_v3owner.py new file mode 100644 index 000000000000..4bd289c945d0 --- /dev/null +++ b/src/middlewared/middlewared/alembic/versions/25.04/2024-09-12_23-57_remove_nfs_v4_v3owner.py @@ -0,0 +1,26 @@ +"""Remove deprecated v4_v3owner NFS configuration option + + +Revision ID: 6dedf12c1035 +Revises: 7b618b9ca77d +Create Date: 2024-09-12 23:57:43.814512+00:00 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '6dedf12c1035' +down_revision = '7b618b9ca77d' +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table('services_nfs', schema=None) as batch_op: + batch_op.drop_column('nfs_srv_v4_v3owner') + + +def downgrade(): + pass diff --git a/src/middlewared/middlewared/alert/source/failover.py b/src/middlewared/middlewared/alert/source/failover.py index 0c7101790a34..ffd24a043ec6 100644 --- a/src/middlewared/middlewared/alert/source/failover.py +++ b/src/middlewared/middlewared/alert/source/failover.py @@ -55,9 +55,7 @@ class FailoverAlertSource(AlertSource): run_on_backup_node = False async def check(self): - if not await self.middleware.call('failover.licensed'): - return [] - elif not await self.middleware.call('failover.internal_interfaces'): + if not await self.middleware.call('failover.internal_interfaces'): return [Alert(FailoverInterfaceNotFoundAlertClass)] try: diff --git a/src/middlewared/middlewared/alert/source/failover_disks.py b/src/middlewared/middlewared/alert/source/failover_disks.py index 515bef1c535d..7f0ee17b0014 100644 --- a/src/middlewared/middlewared/alert/source/failover_disks.py +++ b/src/middlewared/middlewared/alert/source/failover_disks.py @@ -31,8 +31,7 @@ class FailoverDisksAlertSource(AlertSource): run_on_backup_node = False async def check(self): - licensed = await self.middleware.call('failover.licensed') - if licensed and (md := await self.middleware.call('failover.mismatch_disks')): + if (md := await self.middleware.call('failover.mismatch_disks')): if md['missing_remote']: return [Alert( DisksAreNotPresentOnStandbyNodeAlertClass, {'serials': ', '.join(md['missing_remote'])} diff --git a/src/middlewared/middlewared/alert/source/failover_interfaces.py b/src/middlewared/middlewared/alert/source/failover_interfaces.py index 28cb313babf4..19d468236675 100644 --- a/src/middlewared/middlewared/alert/source/failover_interfaces.py +++ b/src/middlewared/middlewared/alert/source/failover_interfaces.py @@ -20,8 +20,7 @@ class FailoverCriticalAlertSource(AlertSource): run_on_backup_node = False async def check(self): - licensed = await self.middleware.call('failover.licensed') - if licensed and not await self.middleware.call('interface.query', [('failover_critical', '=', True)]): + if not await self.middleware.call('interface.query', [('failover_critical', '=', True)]): return [Alert(NoCriticalFailoverInterfaceFoundAlertClass)] else: return [] diff --git a/src/middlewared/middlewared/alert/source/failover_nics.py b/src/middlewared/middlewared/alert/source/failover_nics.py index 8aaccf22f743..6f562b575299 100644 --- a/src/middlewared/middlewared/alert/source/failover_nics.py +++ b/src/middlewared/middlewared/alert/source/failover_nics.py @@ -31,8 +31,7 @@ class FailoverNetworkCardsAlertSource(AlertSource): run_on_backup_node = False async def check(self): - licensed = await self.middleware.call('failover.licensed') - if licensed and (interfaces := await self.middleware.call('failover.mismatch_nics')): + if (interfaces := await self.middleware.call('failover.mismatch_nics')): if interfaces['missing_remote']: return [Alert( NetworkCardsMismatchOnStandbyNodeAlertClass, {'interfaces': ', '.join(interfaces['missing_remote'])} diff --git a/src/middlewared/middlewared/api/base/types/base.py b/src/middlewared/middlewared/api/base/types/base.py index 33b590e49f45..1ced93e36071 100644 --- a/src/middlewared/middlewared/api/base/types/base.py +++ b/src/middlewared/middlewared/api/base/types/base.py @@ -1,12 +1,14 @@ from typing import Any, Generic, get_args, get_origin, TypeVar -from pydantic import BeforeValidator, Field, GetCoreSchemaHandler, PlainSerializer +from pydantic import AfterValidator, BeforeValidator, Field, GetCoreSchemaHandler, HttpUrl as _HttpUrl, PlainSerializer from pydantic_core import CoreSchema, core_schema, PydanticKnownError from typing_extensions import Annotated from middlewared.utils.lang import undefined -__all__ = ["LongString", "NonEmptyString", "Private", "PRIVATE_VALUE"] +__all__ = ["HttpUrl", "LongString", "NonEmptyString", "Private", "PRIVATE_VALUE"] + +HttpUrl = Annotated[_HttpUrl, AfterValidator(str)] class LongStringWrapper: diff --git a/src/middlewared/middlewared/api/v25_04_0/__init__.py b/src/middlewared/middlewared/api/v25_04_0/__init__.py index 4125f74dc2f2..9e782dc05dc0 100644 --- a/src/middlewared/middlewared/api/v25_04_0/__init__.py +++ b/src/middlewared/middlewared/api/v25_04_0/__init__.py @@ -4,6 +4,7 @@ from .common import * # noqa from .core import * # noqa from .group import * # noqa +from .keychain import * # noqa from .privilege import * # noqa from .user import * # noqa from .vendor import * # noqa diff --git a/src/middlewared/middlewared/api/v25_04_0/keychain.py b/src/middlewared/middlewared/api/v25_04_0/keychain.py new file mode 100644 index 000000000000..ffc4080c0e29 --- /dev/null +++ b/src/middlewared/middlewared/api/v25_04_0/keychain.py @@ -0,0 +1,164 @@ +from typing import Literal + +from pydantic import Field + +from middlewared.api.base import (BaseModel, Excluded, excluded_field, ForUpdateMetaclass, HttpUrl, NonEmptyString, + Private, single_argument_args, single_argument_result) + +__all__ = ["KeychainCredentialEntry", + "KeychainCredentialCreateArgs", "KeychainCredentialCreateResult", + "KeychainCredentialUpdateArgs", "KeychainCredentialUpdateResult", + "KeychainCredentialDeleteArgs", "KeychainCredentialDeleteResult", + "KeychainCredentialUsedByArgs", "KeychainCredentialUsedByResult", + "KeychainCredentialGetOfTypeArgs", "KeychainCredentialGetOfTypeResult", + "KeychainCredentialGenerateSSHKeyPairArgs", "KeychainCredentialGenerateSSHKeyPairResult", + "KeychainCredentialRemoteSSHHostKeyScanArgs", "KeychainCredentialRemoteSSHHostKeyScanResult", + "KeychainCredentialRemoteSSHSemiautomaticSetupArgs", "KeychainCredentialRemoteSSHSemiautomaticSetupResult", + "KeychainCredentialSSHPairArgs", "KeychainCredentialSSHPairResult", + "KeychainCredentialSetupSSHConnectionArgs", "KeychainCredentialSetupSSHConnectionResult"] + + +class KeychainCredentialEntry(BaseModel): + id: int + name: NonEmptyString + type: str + attributes: Private[dict] + + +class KeychainCredentialCreate(KeychainCredentialEntry): + id: Excluded = excluded_field() + + +class KeychainCredentialUpdate(KeychainCredentialCreate, metaclass=ForUpdateMetaclass): + type: Excluded = excluded_field() + + +class KeychainCredentialCreateArgs(BaseModel): + keychain_credential_create: KeychainCredentialCreate + + +class KeychainCredentialCreateResult(BaseModel): + result: KeychainCredentialEntry + + +class KeychainCredentialUpdateArgs(BaseModel): + id: int + keychain_credential_update: KeychainCredentialUpdate + + +class KeychainCredentialUpdateResult(BaseModel): + result: KeychainCredentialEntry + + +class KeychainCredentialDeleteOptions(BaseModel): + cascade: bool = False + + +class KeychainCredentialDeleteArgs(BaseModel): + id: int + options: KeychainCredentialDeleteOptions = Field(default=KeychainCredentialDeleteOptions()) + + +class KeychainCredentialDeleteResult(BaseModel): + result: None + + +class KeychainCredentialUsedByArgs(BaseModel): + id: int + + +class UsedKeychainCredential(BaseModel): + title: str + unbind_method: Literal["delete", "disable"] + + +class KeychainCredentialUsedByResult(BaseModel): + result: list[UsedKeychainCredential] + + +class KeychainCredentialGetOfTypeArgs(BaseModel): + id: int + type: str + + +@single_argument_result +class KeychainCredentialGetOfTypeResult(KeychainCredentialEntry): + pass + + +class KeychainCredentialGenerateSSHKeyPairArgs(BaseModel): + pass + + +@single_argument_result +class KeychainCredentialGenerateSSHKeyPairResult(BaseModel): + private_key: str + public_key: str + + +@single_argument_args("keychain_remote_ssh_host_key_scan") +class KeychainCredentialRemoteSSHHostKeyScanArgs(BaseModel): + host: NonEmptyString + port: int = 22 + connect_timeout: int = 10 + + +class KeychainCredentialRemoteSSHHostKeyScanResult(BaseModel): + result: str + + +@single_argument_args("keychain_remote_ssh_semiautomatic_setup") +class KeychainCredentialRemoteSSHSemiautomaticSetupArgs(BaseModel): + name: NonEmptyString + url: HttpUrl + verify_ssl: bool = True + token: Private[str | None] = None + admin_username: str = "root" + password: Private[str | None] = None + otp_token: Private[str | None] = None + username: str = "root" + private_key: Private[int] + connect_timeout: int = 10 + sudo: bool = False + + +class KeychainCredentialRemoteSSHSemiautomaticSetupResult(BaseModel): + result: KeychainCredentialEntry + + +@single_argument_args("keychain_ssh_pair") +class KeychainCredentialSSHPairArgs(BaseModel): + remote_hostname: NonEmptyString + username: str = "root" + public_key: NonEmptyString + + +class KeychainCredentialSSHPairResult(BaseModel): + result: None + + +class KeychainCredentialSetupSSHConnectionPrivateKey(BaseModel): + generate_key: bool = True + existing_key_id: int | None = None + name: NonEmptyString + + +class KeychainCredentialSetupSSHConnectionSemiAutomaticSetup( + KeychainCredentialRemoteSSHSemiautomaticSetupArgs.model_fields["keychain_remote_ssh_semiautomatic_setup"].annotation +): + name: Excluded = excluded_field() + private_key: Excluded = excluded_field() + + +@single_argument_args("setup_ssh_connection") +class KeychainCredentialSetupSSHConnectionArgs(BaseModel): + private_key: KeychainCredentialSetupSSHConnectionPrivateKey | None = None + connection_name: NonEmptyString + setup_type: Literal["SEMI-AUTOMATIC", "MANUAL"] = "MANUAL" + semi_automatic_setup: KeychainCredentialSetupSSHConnectionSemiAutomaticSetup | None = None + manual_setup: dict | None = None + + +@single_argument_result +class KeychainCredentialSetupSSHConnectionResult(KeychainCredentialEntry): + pass diff --git a/src/middlewared/middlewared/etc_files/motd.mako b/src/middlewared/middlewared/etc_files/motd.mako index 89cafc726e4b..d417b297bb63 100644 --- a/src/middlewared/middlewared/etc_files/motd.mako +++ b/src/middlewared/middlewared/etc_files/motd.mako @@ -5,8 +5,9 @@ TrueNAS (c) 2009-${buildtime.year}, iXsystems, Inc. All rights reserved. - TrueNAS code is released under the modified BSD license with some - files copyrighted by (c) iXsystems, Inc. + TrueNAS code is released under the LGPLv3 and GPLv3 licenses with some + source files copyrighted by (c) iXsystems, Inc. All other components + are released under their own respective licenses. For more information, documentation, help or support, go here: http://truenas.com diff --git a/src/middlewared/middlewared/logger.py b/src/middlewared/middlewared/logger.py index d3d99d43b06b..2dd6afae5037 100644 --- a/src/middlewared/middlewared/logger.py +++ b/src/middlewared/middlewared/logger.py @@ -39,6 +39,7 @@ logging.getLogger('docker.auth').setLevel(logging.ERROR) logging.TRACE = 6 +APP_LIFECYCLE_LOGFILE = '/var/log/app_lifecycle.log' APP_MIGRATION_LOGFILE = '/var/log/app_migrations.log' DOCKER_IMAGE_LOGFILE = '/var/log/docker_image.log' FAILOVER_LOGFILE = '/var/log/failover.log' @@ -83,6 +84,7 @@ def configure_logging(self, output_option: str): else: for name, filename, log_format in [ (None, LOGFILE, self.log_format), + ('app_lifecycle', APP_LIFECYCLE_LOGFILE, self.log_format), ('app_migrations', APP_MIGRATION_LOGFILE, self.log_format), ('docker_image', DOCKER_IMAGE_LOGFILE, self.log_format), ('failover', FAILOVER_LOGFILE, self.log_format), diff --git a/src/middlewared/middlewared/plugins/account.py b/src/middlewared/middlewared/plugins/account.py index 3d73a83b74ee..de412b41bd28 100644 --- a/src/middlewared/middlewared/plugins/account.py +++ b/src/middlewared/middlewared/plugins/account.py @@ -33,8 +33,6 @@ from middlewared.plugins.idmap_.idmap_constants import ( BASE_SYNTHETIC_DATASTORE_ID, IDType, - SID_LOCAL_USER_PREFIX, - SID_LOCAL_GROUP_PREFIX ) from middlewared.plugins.idmap_ import idmap_winbind from middlewared.plugins.idmap_ import idmap_sss @@ -1022,9 +1020,23 @@ def get_user_obj(self, data): user_obj['grouplist'] = None if data['sid_info']: + sid = None match user_obj['source']: - case 'LOCAL' | 'ACTIVEDIRECTORY': - # winbind provides idmapping for local and AD users + case 'LOCAL': + idmap_ctx = None + db_entry = self.middleware.call_sync('user.query', [ + ['username', '=', user_obj['pw_name']], + ['local', '=', True] + ], {'select': ['sid']}) + if not db_entry: + self.logger.error( + '%s: local user exists on server but does not exist in the ' + 'the user account table.', user_obj['pw_name'] + ) + else: + sid = db_entry[0]['sid'] + case 'ACTIVEDIRECTORY': + # winbind provides idmapping for AD users try: idmap_ctx = idmap_winbind.WBClient() except wbclient.WBCError as e: @@ -1047,20 +1059,9 @@ def get_user_obj(self, data): 'id': user_obj['pw_uid'] })['sid'] except MatchNotFound: - if user_obj['source'] == 'LOCAL': - # Local user that doesn't have passdb entry - # we can simply apply default prefix - sid = SID_LOCAL_USER_PREFIX + str(user_obj['pw_uid']) - else: - # This is a more odd situation. The user accout exists - # in IPA but doesn't have a SID assigned to it. - sid = None - else: - # We were unable to establish an idmap client context even - # though we were able to retrieve the user account info. This - # most likely means that we're dealing with a local account and - # winbindd is not running. - sid = None + # This is a more odd situation. Most likely case is that the user account exists + # in IPA but doesn't have a SID assigned to it. All AD users have SIDs. + sid = None user_obj['sid'] = sid else: @@ -1832,7 +1833,8 @@ async def do_delete(self, audit_callback, pk, options): ) group = await self.get_instance(pk) - audit_callback(group['name'] + (' and all its users' if options['delete_users'] else '')) + audit_callback(group['name'] + (' and all users that have this group as their primary group' + if options['delete_users'] else '')) if group['builtin']: raise CallError('A built-in group cannot be deleted.', errno.EACCES) @@ -1932,9 +1934,24 @@ def get_group_obj(self, data): grp_obj['local'] = grp_obj['source'] == 'LOCAL' if data['sid_info']: + sid = None + match grp_obj['source']: - case 'LOCAL' | 'ACTIVEDIRECTORY': - # winbind provides idmapping for local and AD users + case 'LOCAL': + idmap_ctx = None + db_entry = self.middleware.call_sync('group.query', [ + ['group', '=', grp_obj['gr_name']], + ['local', '=', True] + ], {'select': ['sid']}) + if not db_entry: + self.logger.error( + '%s: local group exists on server but does not exist in the ' + 'the group account table.', grp_obj['gr_name'] + ) + else: + sid = db_entry[0]['sid'] + case 'ACTIVEDIRECTORY': + # winbind provides idmapping for AD groups try: idmap_ctx = idmap_winbind.WBClient() except wbclient.WBCError as e: @@ -1961,14 +1978,8 @@ def get_group_obj(self, data): 'id': grp_obj['gr_gid'] })['sid'] except MatchNotFound: - if grp_obj['source'] == 'LOCAL': - # Local user that doesn't have groupmap entry - # we can simply apply default prefix - sid = SID_LOCAL_GROUP_PREFIX + str(grp_obj['gr_gid']) - else: - sid = None - else: - sid = None + # This can happen if IPA and group doesn't have SID assigned + sid = None grp_obj['sid'] = sid else: diff --git a/src/middlewared/middlewared/plugins/acme_protocol.py b/src/middlewared/middlewared/plugins/acme_protocol.py index d95bf19a4a82..72dd9a173e78 100644 --- a/src/middlewared/middlewared/plugins/acme_protocol.py +++ b/src/middlewared/middlewared/plugins/acme_protocol.py @@ -171,7 +171,7 @@ def do_create(self, data): self._config.datastore, { 'uri': register.uri, - 'tos': register.terms_of_service, + 'tos': register.terms_of_service or '', 'new_account_uri': directory.newAccount, 'new_nonce_uri': directory.newNonce, 'new_order_uri': directory.newOrder, diff --git a/src/middlewared/middlewared/plugins/alert.py b/src/middlewared/middlewared/plugins/alert.py index f06a5d8ceddb..1f4232c5d2cd 100644 --- a/src/middlewared/middlewared/plugins/alert.py +++ b/src/middlewared/middlewared/plugins/alert.py @@ -1,7 +1,9 @@ +from dataclasses import dataclass from collections import defaultdict, namedtuple import copy from datetime import datetime, timezone import errno +from itertools import zip_longest import os import textwrap import time @@ -38,17 +40,23 @@ from middlewared.utils.plugins import load_modules, load_classes from middlewared.utils.python import get_middlewared_dir from middlewared.utils.time_utils import utc_now - +from middlewared.plugins.failover_.remote import NETWORK_ERRORS POLICIES = ["IMMEDIATELY", "HOURLY", "DAILY", "NEVER"] DEFAULT_POLICY = "IMMEDIATELY" - ALERT_SOURCES = {} ALERT_SERVICES_FACTORIES = {} +SEND_ALERTS_ON_READY = False AlertSourceLock = namedtuple("AlertSourceLock", ["source_name", "expires_at"]) -SEND_ALERTS_ON_READY = False + +@dataclass(slots=True, frozen=True, kw_only=True) +class AlertFailoverInfo: + this_node: str + other_node: str + run_on_backup_node: bool + run_failover_related: bool class AlertModel(sa.Model): @@ -655,31 +663,80 @@ async def __should_run_or_send_alerts(self): return True - async def __run_alerts(self): - master_node = "A" - backup_node = "B" - product_type = await self.middleware.call("alert.product_type") - run_on_backup_node = False - run_failover_related = False - if product_type == "SCALE_ENTERPRISE": - if await self.middleware.call("failover.licensed"): - if await self.middleware.call("failover.node") == "B": - master_node = "B" - backup_node = "A" + async def __get_failover_info(self): + this_node, other_node = "A", "B" + run_on_backup_node = run_failover_related = False + run_failover_related = await self.middleware.call("failover.licensed") + if run_failover_related: + if await self.middleware.call("failover.node") != "A": + this_node, other_node = "B", "A" + + run_failover_related = time.monotonic() > self.blocked_failover_alerts_until + if run_failover_related: try: - remote_version = await self.middleware.call("failover.call_remote", "system.version") - remote_system_state = await self.middleware.call("failover.call_remote", "system.state") - remote_failover_status = await self.middleware.call("failover.call_remote", - "failover.status") + args = ([], {"connect_timeout": 2}) + rem_ver = await self.middleware.call("failover.call_remote", "system.version", *args) + rem_state = await self.middleware.call("failover.call_remote", "system.state", *args) + rem_fstat = await self.middleware.call("failover.call_remote", "failover.status", *args) except Exception: pass else: - if remote_version == await self.middleware.call("system.version"): - if remote_system_state == "READY" and remote_failover_status == "BACKUP": - run_on_backup_node = True + run_on_backup_node = all(( + await self.middleware.call("system.version") == rem_ver, + rem_state == "READY", + rem_fstat == "BACKUP", + )) + + return AlertFailoverInfo( + this_node=this_node, + other_node=other_node, + run_on_backup_node=run_on_backup_node, + run_failover_related=run_failover_related + ) - run_failover_related = time.monotonic() > self.blocked_failover_alerts_until + async def __handle_locked_alert_source(self, name, this_node, other_node): + this_node_alerts, other_node_alerts = [], [] + locked = self.blocked_sources[name] + if locked: + self.logger.debug("Not running alert source %r because it is blocked", name) + for i in filter(lambda x: x.source == name, self.alerts): + if i.node == this_node: + this_node_alerts.append(i) + elif i.node == other_node: + other_node_alerts.append(i) + return this_node_alerts, other_node_alerts, locked + + async def __run_other_node_alert_source(self, name): + keys = ("args", "datetime", "last_occurrence", "dismissed", "mail",) + other_node_alerts = [] + try: + try: + for alert in await self.middleware.call("failover.call_remote", "alert.run_source", [name]): + other_node_alerts.append([ + Alert(**dict( + {k: v for k, v in alert.items() if k in keys}, + klass=AlertClass.class_by_name[alert["klass"]], + _source=alert["source"], + _key=alert["key"] + )) + ]) + except CallError as e: + if e.errno not in NETWORK_ERRORS + (CallError.EALERTCHECKERUNAVAILABLE,): + raise + except ReserveFDException: + self.logger.debug('Failed to reserve a privileged port') + except Exception as e: + other_node_alerts = [Alert( + AlertSourceRunFailedOnBackupNodeAlertClass, + args={"source_name": name, "traceback": str(e)}, + _source=name + )] + + return other_node_alerts + async def __run_alerts(self): + product_type = await self.middleware.call("alert.product_type") + fi = await self.__get_failover_info() for k, source_lock in list(self.sources_locks.items()): if source_lock.expires_at <= time.monotonic(): await self.unblock_source(k) @@ -688,7 +745,7 @@ async def __run_alerts(self): if product_type not in alert_source.products: continue - if alert_source.failover_related and not run_failover_related: + if alert_source.failover_related and not fi.run_failover_related: continue if not alert_source.schedule.should_run(utc_now(), self.alert_source_last_run[alert_source.name]): @@ -696,69 +753,29 @@ async def __run_alerts(self): self.alert_source_last_run[alert_source.name] = utc_now() - alerts_a = [alert - for alert in self.alerts - if alert.node == master_node and alert.source == alert_source.name] - locked = False - if self.blocked_sources[alert_source.name]: - self.logger.debug("Not running alert source %r because it is blocked", alert_source.name) - locked = True - else: + this_node_alerts, other_node_alerts, locked = await self.__handle_locked_alert_source( + alert_source.name, fi.this_node, fi.other_node + ) + if not locked: self.logger.trace("Running alert source: %r", alert_source.name) - try: - alerts_a = await self.__run_source(alert_source.name) + this_node_alerts = await self.__run_source(alert_source.name) except UnavailableException: pass - for alert in alerts_a: - alert.node = master_node - - alerts_b = [] - if run_on_backup_node and alert_source.run_on_backup_node: - try: - alerts_b = [alert - for alert in self.alerts - if alert.node == backup_node and alert.source == alert_source.name] - try: - if not locked: - alerts_b = await self.middleware.call("failover.call_remote", "alert.run_source", - [alert_source.name]) - - alerts_b = [Alert(**dict({k: v for k, v in alert.items() - if k in ["args", "datetime", "last_occurrence", "dismissed", - "mail"]}, - klass=AlertClass.class_by_name[alert["klass"]], - _source=alert["source"], - _key=alert["key"])) - for alert in alerts_b] - except CallError as e: - if e.errno in [errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET, errno.EHOSTDOWN, - errno.ETIMEDOUT, CallError.EALERTCHECKERUNAVAILABLE]: - pass - else: - raise - except ReserveFDException: - self.logger.debug('Failed to reserve a privileged port') - except Exception as e: - alerts_b = [ - Alert(AlertSourceRunFailedOnBackupNodeAlertClass, - args={ - "source_name": alert_source.name, - "traceback": str(e), - }, - _source=alert_source.name) - ] - for alert in alerts_b: - alert.node = backup_node + if fi.run_on_backup_node and alert_source.run_on_backup_node: + other_node_alerts = await self.__run_other_node_alert_source(alert_source.name) - for alert in alerts_a + alerts_b: - self.__handle_alert(alert) + for talert, oalert in zip_longest(this_node_alerts, other_node_alerts, fillvalue=None): + if talert is not None: + talert.node = fi.this_node + self.__handle_alert(talert) + if oalert is not None: + oalert.node = fi.other_node + self.__handle_alert(oalert) self.alerts = ( - [a for a in self.alerts if a.source != alert_source.name] + - alerts_a + - alerts_b + [a for a in self.alerts if a.source != alert_source.name] + this_node_alerts + other_node_alerts ) def __handle_alert(self, alert): diff --git a/src/middlewared/middlewared/plugins/api_key.py b/src/middlewared/middlewared/plugins/api_key.py index 2bf5e007b7cf..af7ebe7fca6d 100644 --- a/src/middlewared/middlewared/plugins/api_key.py +++ b/src/middlewared/middlewared/plugins/api_key.py @@ -6,9 +6,8 @@ from middlewared.api import api_method from middlewared.api.current import ( - ApiKeyCreateArgs, ApiKeyCreateResult, ApiKeyUpdateArgs, - ApiKeyUpdateResult, ApiKeyDeleteArgs, ApiKeyDeleteResult, - HttpVerb + ApiKeyEntry, ApiKeyCreateArgs, ApiKeyCreateResult, ApiKeyUpdateArgs, ApiKeyUpdateResult, + ApiKeyDeleteArgs, ApiKeyDeleteResult, HttpVerb, ) from middlewared.service import CRUDService, private, ValidationErrors import middlewared.sqlalchemy as sa @@ -46,6 +45,7 @@ class Config: datastore = "account.api_key" datastore_extend = "api_key.item_extend" cli_namespace = "auth.api_key" + entry = ApiKeyEntry @private async def item_extend(self, item): diff --git a/src/middlewared/middlewared/plugins/apps/compose_utils.py b/src/middlewared/middlewared/plugins/apps/compose_utils.py index 534abfd32937..f8e4279a6e42 100644 --- a/src/middlewared/middlewared/plugins/apps/compose_utils.py +++ b/src/middlewared/middlewared/plugins/apps/compose_utils.py @@ -1,4 +1,5 @@ import itertools +import logging import typing from middlewared.service_exception import CallError @@ -7,6 +8,9 @@ from .utils import PROJECT_PREFIX, run +logger = logging.getLogger('app_lifecycle') + + def compose_action( app_name: str, app_version: str, action: typing.Literal['up', 'down', 'pull'], *, force_recreate: bool = False, remove_orphans: bool = False, remove_images: bool = False, @@ -49,4 +53,7 @@ def compose_action( # TODO: We will likely have a configurable timeout on this end cp = run(['docker', 'compose'] + compose_files + args, timeout=1200) if cp.returncode != 0: - raise CallError(f'Failed {action!r} action for {app_name!r} app: {cp.stderr}') + logger.error('Failed %r action for %r app: %s', action, app_name, cp.stderr) + raise CallError( + f'Failed {action!r} action for {app_name!r} app, please check /var/log/app_lifecycle.log for more details' + ) diff --git a/src/middlewared/middlewared/plugins/apps/crud.py b/src/middlewared/middlewared/plugins/apps/crud.py index a5dd45654c99..26f727a7c90f 100644 --- a/src/middlewared/middlewared/plugins/apps/crud.py +++ b/src/middlewared/middlewared/plugins/apps/crud.py @@ -4,6 +4,8 @@ import shutil import textwrap +from catalog_reader.custom_app import get_version_details + from middlewared.schema import accepts, Bool, Dict, Int, List, Ref, returns, Str from middlewared.service import ( CallError, CRUDService, filterable, InstanceNotFound, job, pass_app, private, ValidationErrors @@ -119,10 +121,15 @@ def query(self, app, filters, options): questions_context = self.middleware.call_sync('catalog.get_normalized_questions_context') for app in apps: - app['version_details'] = self.middleware.call_sync( - 'catalog.app_version_details', get_installed_app_version_path(app['name'], app['version']), - questions_context, - ) + if app['custom_app']: + version_details = get_version_details() + else: + version_details = self.middleware.call_sync( + 'catalog.app_version_details', get_installed_app_version_path(app['name'], app['version']), + questions_context, + ) + + app['version_details'] = version_details return filter_list(apps, filters, options) @@ -337,13 +344,13 @@ def delete_internal(self, job, app_name, app_config, options): app_name, app_config['version'], 'down', remove_orphans=True, remove_volumes=True, remove_images=options['remove_images'], ) - try: - job.set_progress(80, 'Cleaning up resources') - shutil.rmtree(get_installed_app_path(app_name)) - if options['remove_ix_volumes'] and (apps_volume_ds := self.get_app_volume_ds(app_name)): - self.middleware.call_sync('zfs.dataset.delete', apps_volume_ds, {'recursive': True}) - finally: - self.middleware.call_sync('app.metadata.generate').wait_sync(raise_error=True) + # Remove app from metadata first as if someone tries to query filesystem info of the app + # where the app resources have been nuked from filesystem, it will error out + self.middleware.call_sync('app.metadata.generate', [app_name]).wait_sync(raise_error=True) + job.set_progress(80, 'Cleaning up resources') + shutil.rmtree(get_installed_app_path(app_name)) + if options['remove_ix_volumes'] and (apps_volume_ds := self.get_app_volume_ds(app_name)): + self.middleware.call_sync('zfs.dataset.delete', apps_volume_ds, {'recursive': True}) if options.get('send_event', True): self.middleware.send_event('app.query', 'REMOVED', id=app_name) diff --git a/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py b/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py index ac177f88691f..d6ea09d1e7ca 100644 --- a/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py +++ b/src/middlewared/middlewared/plugins/apps/ix_apps/lifecycle.py @@ -5,6 +5,7 @@ import yaml from middlewared.service_exception import CallError +from middlewared.utils.io import write_if_changed from .path import ( get_installed_app_config_path, get_installed_app_rendered_dir_path, get_installed_app_version_path, @@ -18,7 +19,8 @@ def get_rendered_template_config_of_app(app_name: str, version: str) -> dict: for rendered_file in get_rendered_templates_of_app(app_name, version): with contextlib.suppress(FileNotFoundError, yaml.YAMLError): with open(rendered_file, 'r') as f: - rendered_config.update(yaml.safe_load(f.read())) + if (data := yaml.safe_load(f)) is not None: + rendered_config.update(data) return rendered_config @@ -32,13 +34,13 @@ def get_rendered_templates_of_app(app_name: str, version: str) -> list[str]: def write_new_app_config(app_name: str, version: str, values: dict[str, typing.Any]) -> None: - with open(get_installed_app_config_path(app_name, version), 'w') as f: - f.write(yaml.safe_dump(values)) + app_config_path = get_installed_app_config_path(app_name, version) + write_if_changed(app_config_path, yaml.safe_dump(values), perms=0o600, raise_error=False) def get_current_app_config(app_name: str, version: str) -> dict: with open(get_installed_app_config_path(app_name, version), 'r') as f: - return yaml.safe_load(f) + return yaml.safe_load(f) or {} def render_compose_templates(app_version_path: str, values_file_path: str): @@ -51,8 +53,8 @@ def render_compose_templates(app_version_path: str, values_file_path: str): def update_app_config(app_name: str, version: str, values: dict[str, typing.Any], custom_app: bool = False) -> None: write_new_app_config(app_name, version, values) if custom_app: - with open(get_installed_custom_app_compose_file(app_name, version), 'w') as f: - f.write(yaml.safe_dump(values)) + compose_file_path = get_installed_custom_app_compose_file(app_name, version) + write_if_changed(compose_file_path, yaml.safe_dump(values), perms=0o600, raise_error=False) else: render_compose_templates( get_installed_app_version_path(app_name, version), get_installed_app_config_path(app_name, version) diff --git a/src/middlewared/middlewared/plugins/apps/ix_apps/metadata.py b/src/middlewared/middlewared/plugins/apps/ix_apps/metadata.py index 45be0da61f01..64ee57edbff8 100644 --- a/src/middlewared/middlewared/plugins/apps/ix_apps/metadata.py +++ b/src/middlewared/middlewared/plugins/apps/ix_apps/metadata.py @@ -3,63 +3,66 @@ import yaml +from middlewared.utils.io import write_if_changed from .path import get_collective_config_path, get_collective_metadata_path, get_installed_app_metadata_path from .portals import get_portals_and_app_notes -def get_app_metadata(app_name: str) -> dict[str, typing.Any]: +def _load_app_yaml(yaml_path: str) -> dict[str, typing.Any]: + """ wrapper around yaml.safe_load that ensure dict always returned """ try: - with open(get_installed_app_metadata_path(app_name), 'r') as f: - return yaml.safe_load(f) + with open(yaml_path, 'r') as f: + if (data := yaml.safe_load(f)) is None: + # yaml.safe_load may return None if file empty + return {} + + return data except (FileNotFoundError, yaml.YAMLError): return {} +def get_app_metadata(app_name: str) -> dict[str, typing.Any]: + return _load_app_yaml(get_installed_app_metadata_path(app_name)) + + def update_app_metadata( app_name: str, app_version_details: dict, migrated: bool | None = None, custom_app: bool = False, ): migrated = get_app_metadata(app_name).get('migrated', False) if migrated is None else migrated - with open(get_installed_app_metadata_path(app_name), 'w') as f: - f.write(yaml.safe_dump({ + write_if_changed(get_installed_app_metadata_path(app_name), yaml.safe_dump({ 'metadata': app_version_details['app_metadata'], 'migrated': migrated, 'custom_app': custom_app, **{k: app_version_details[k] for k in ('version', 'human_version')}, **get_portals_and_app_notes(app_name, app_version_details['version']), # TODO: We should not try to get portals for custom apps for now - })) + }), perms=0o600, raise_error=False) def update_app_metadata_for_portals(app_name: str, version: str): # This should be called after config of app has been updated as that will render compose files app_metadata = get_app_metadata(app_name) - with open(get_installed_app_metadata_path(app_name), 'w') as f: - f.write(yaml.safe_dump({ - **app_metadata, - **get_portals_and_app_notes(app_name, version), - })) + + # Using write_if_changed ensures atomicity of the write via writing to a temporary + # file then renaming over existing one. + write_if_changed(get_installed_app_metadata_path(app_name), yaml.safe_dump({ + **app_metadata, + **get_portals_and_app_notes(app_name, version), + }), perms=0o600, raise_error=False) def get_collective_config() -> dict[str, dict]: - try: - with open(get_collective_config_path(), 'r') as f: - return yaml.safe_load(f.read()) - except FileNotFoundError: - return {} + return _load_app_yaml(get_collective_config_path()) def get_collective_metadata() -> dict[str, dict]: - try: - with open(get_collective_metadata_path(), 'r') as f: - return yaml.safe_load(f.read()) - except FileNotFoundError: - return {} + return _load_app_yaml(get_collective_metadata_path()) def update_app_yaml_for_last_update(version_path: str, last_update: str): - with open(os.path.join(version_path, 'app.yaml'), 'r') as f: - app_config = yaml.safe_load(f.read()) + app_yaml_path = os.path.join(version_path, 'app.yaml') + + app_config = _load_app_yaml(app_yaml_path) + app_config['last_update'] = last_update - with open(os.path.join(version_path, 'app.yaml'), 'w') as f: - app_config['last_update'] = last_update - f.write(yaml.safe_dump(app_config)) + write_if_changed(app_yaml_path, yaml.safe_dump(app_config), perms=0o600, raise_error=False) diff --git a/src/middlewared/middlewared/plugins/apps/ix_apps/setup.py b/src/middlewared/middlewared/plugins/apps/ix_apps/setup.py index b7f91981a9c7..489755e30c50 100644 --- a/src/middlewared/middlewared/plugins/apps/ix_apps/setup.py +++ b/src/middlewared/middlewared/plugins/apps/ix_apps/setup.py @@ -3,6 +3,7 @@ import textwrap import yaml +from middlewared.utils.io import write_if_changed from .metadata import update_app_yaml_for_last_update from .path import get_app_parent_config_path, get_installed_app_version_path @@ -20,9 +21,14 @@ def setup_install_app_dir(app_name: str, app_version_details: dict, custom_app: This is a custom app where user can use his/her own docker compose file for deploying services. ''')) + f.flush() - with open(os.path.join(destination, 'app.yaml'), 'w') as f: - f.write(yaml.safe_dump(app_version_details['app_metadata'])) + write_if_changed( + os.path.join(destination, 'app.yaml'), + yaml.safe_dump(app_version_details['app_metadata']), + perms=0o600, + raise_error=False + ) else: shutil.copytree(app_version_details['location'], destination) diff --git a/src/middlewared/middlewared/plugins/apps/metadata.py b/src/middlewared/middlewared/plugins/apps/metadata.py index 83fa924984ae..a5a736bb2d9c 100644 --- a/src/middlewared/middlewared/plugins/apps/metadata.py +++ b/src/middlewared/middlewared/plugins/apps/metadata.py @@ -15,11 +15,12 @@ class Config: private = True @job(lock='app_metadata_generate', lock_queue_size=1) - def generate(self, job): + def generate(self, job, blacklisted_apps=None): config = {} metadata = {} + blacklisted_apps = blacklisted_apps or [] with os.scandir(get_app_parent_config_path()) as scan: - for entry in filter(lambda e: e.is_dir(), scan): + for entry in filter(lambda e: e.name not in blacklisted_apps and e.is_dir(), scan): if not (app_metadata := get_app_metadata(entry.name)): # The app is malformed or something is seriously wrong with it continue diff --git a/src/middlewared/middlewared/plugins/apps/stats_util.py b/src/middlewared/middlewared/plugins/apps/stats_util.py index 4aca7828b5ff..0a566170da3d 100644 --- a/src/middlewared/middlewared/plugins/apps/stats_util.py +++ b/src/middlewared/middlewared/plugins/apps/stats_util.py @@ -1,7 +1,8 @@ +from middlewared.utils.cpu import cpu_info + from .ix_apps.metadata import get_collective_metadata from .ix_apps.utils import get_app_name_from_project_name - NANO_SECOND = 1000000000 @@ -27,7 +28,14 @@ def normalize_projects_stats(all_projects_stats: dict, old_stats: dict, interval # 2. Normalize this delta over the given time interval by dividing by (interval * NANO_SECOND). # 3. Multiply by 100 to convert to percentage. cpu_delta = data['cpu_usage'] - old_stats[project]['cpu_usage'] - normalized_data['cpu_usage'] = (cpu_delta / (interval * NANO_SECOND)) * 100 + if cpu_delta >= 0: + normalized_data['cpu_usage'] = (cpu_delta / (interval * NANO_SECOND * cpu_info()['core_count'])) * 100 + else: + # This will happen when there were multiple containers and an app is being stopped + # and old stats contain cpu usage times of multiple containers and current stats + # only contains the stats of the containers which are still running which means collectively + # current cpu usage time will be obviously low then what old stats contain + normalized_data['cpu_usage'] = 0 networks = [] for net_name, network_data in data['networks'].items(): diff --git a/src/middlewared/middlewared/plugins/audit/audit.py b/src/middlewared/middlewared/plugins/audit/audit.py index 07a029f55bd5..5577ec1a254b 100644 --- a/src/middlewared/middlewared/plugins/audit/audit.py +++ b/src/middlewared/middlewared/plugins/audit/audit.py @@ -31,7 +31,7 @@ accepts, Bool, Datetime, Dict, Int, List, Patch, Ref, returns, Str, UUID ) from middlewared.service import filterable, filterable_returns, job, private, ConfigService -from middlewared.service_exception import CallError, ValidationErrors +from middlewared.service_exception import CallError, ValidationErrors, ValidationError from middlewared.utils import filter_list from middlewared.utils.mount import getmntinfo from middlewared.utils.functools_ import cache @@ -135,6 +135,7 @@ async def compress(self, data): List('services', items=[Str('db_name', enum=ALL_AUDITED)], default=NON_BULK_AUDIT), Ref('query-filters'), Ref('query-options'), + Bool('remote_controller', default=False), register=True )) @filterable_returns(Dict( @@ -159,6 +160,9 @@ async def query(self, data): converted into a more efficient form for better performance. This will not be possible if filters use keys within `svc_data` and `event_data`. + HA systems may direct the query to the 'remote' controller by + including 'remote_controller=True'. The default is the 'current' controller. + Each audit entry contains the following keys: `audit_id` - GUID uniquely identifying this specific audit event. @@ -193,9 +197,37 @@ async def query(self, data): `success` - boolean value indicating whether the action generating the event message succeeded. """ - sql_filters = data['query-options']['force_sql_filters'] verrors = ValidationErrors() + + # If HA, handle the possibility of remote controller requests + if await self.middleware.call('failover.licensed') and data['remote_controller']: + data.pop('remote_controller') + try: + audit_query = await self.middleware.call( + 'failover.call_remote', + 'audit.query', + [data], + {'raise_connect_error': False, 'timeout': 2, 'connect_timeout': 2} + ) + return audit_query + except CallError as e: + if e.errno in [errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET, errno.EHOSTDOWN, + errno.ETIMEDOUT, CallError.EALERTCHECKERUNAVAILABLE]: + raise ValidationError( + 'audit.query.remote_controller', + 'Temporarily failed to communicate to remote controller' + ) + raise ValidationError( + 'audit.query.remote_controller', + 'Failed to query audit logs of remote controller' + ) + except Exception: + self.logger.exception('Unexpected failure querying remote node for audit entries') + raise + + sql_filters = data['query-options']['force_sql_filters'] + if (select := data['query-options'].get('select')): for idx, entry in enumerate(select): if isinstance(entry, list): diff --git a/src/middlewared/middlewared/plugins/audit/utils.py b/src/middlewared/middlewared/plugins/audit/utils.py index 230169b54289..f63d96e0757b 100644 --- a/src/middlewared/middlewared/plugins/audit/utils.py +++ b/src/middlewared/middlewared/plugins/audit/utils.py @@ -25,6 +25,7 @@ AuditEventParam.SUCCESS.value, ) + AuditBase = declarative_base() diff --git a/src/middlewared/middlewared/plugins/boot.py b/src/middlewared/middlewared/plugins/boot.py index 6b53f6ea11cd..30e1577c92e5 100644 --- a/src/middlewared/middlewared/plugins/boot.py +++ b/src/middlewared/middlewared/plugins/boot.py @@ -329,10 +329,6 @@ async def check_update_ashift_property(self): if properties: await self.middleware.call('zfs.pool.update', BOOT_POOL_NAME, {'properties': properties}) - @private - async def is_boot_pool_path(self, path): - return path.startswith(f'/dev/zvol/{await self.pool_name()}/') - async def on_config_upload(middleware, path): await middleware.call('boot.update_initramfs', {'database': path}) diff --git a/src/middlewared/middlewared/plugins/directoryservices.py b/src/middlewared/middlewared/plugins/directoryservices.py index 91472b515c8a..12576ca7e75c 100644 --- a/src/middlewared/middlewared/plugins/directoryservices.py +++ b/src/middlewared/middlewared/plugins/directoryservices.py @@ -35,7 +35,8 @@ class Config: @returns(Dict( 'directoryservices_status', Str('type', enum=[x.value for x in DSType], null=True), - Str('status', enum=[status.name for status in DSStatus], null=True) + Str('status', enum=[status.name for status in DSStatus], null=True), + Str('status_msg', null=True) )) def status(self): """ @@ -47,9 +48,7 @@ def status(self): except Exception: pass - status = DSHealthObj.dump() - status.pop('status_msg') - return status + return DSHealthObj.dump() @no_authz_required @accepts() @@ -218,12 +217,15 @@ def setup(self, job): job.set_progress(100, f'{failover_status}: skipping directory service setup due to failover status') return - self.middleware.call_sync('service.restart', 'idmap') - - self.middleware.call_sync('directoryservices.health.check') + # Recover is called here because it short-circuits if health check + # shows we're healthy. If we can't recover due to things being irreparably + # broken then this will raise an exception. + self.middleware.call_sync('directoryservices.health.recover') if DSHealthObj.dstype is None: return + # nsswitch.conf needs to be updated + self.middleware.call_sync('etc.generate', 'nss') job.set_progress(10, 'Refreshing cache'), cache_refresh = self.middleware.call_sync('directoryservices.cache.refresh') cache_refresh.wait_sync() diff --git a/src/middlewared/middlewared/plugins/directoryservices_/ipa_health_mixin.py b/src/middlewared/middlewared/plugins/directoryservices_/ipa_health_mixin.py index 168b4ea1915d..a89c020d3b04 100644 --- a/src/middlewared/middlewared/plugins/directoryservices_/ipa_health_mixin.py +++ b/src/middlewared/middlewared/plugins/directoryservices_/ipa_health_mixin.py @@ -29,11 +29,8 @@ def _recover_ipa(self, error: IPAHealthError) -> None: self._recover_ipa_config() case IPAHealthCheckFailReason.IPA_NO_CACERT | IPAHealthCheckFailReason.IPA_CACERT_PERM: self._recover_ipa_config() - case IPAHealthCheckFailReason.LDAP_BIND_FAILED: + case IPAHealthCheckFailReason.LDAP_BIND_FAILED | IPAHealthCheckFailReason.SSSD_STOPPED: self._recover_ldap_config() - case IPAHealthCheckFailReason.SSSD_STOPPED: - # pick up with sssd restart below - pass case _: # not recoverable raise error from None diff --git a/src/middlewared/middlewared/plugins/directoryservices_/ldap_health_mixin.py b/src/middlewared/middlewared/plugins/directoryservices_/ldap_health_mixin.py index 39ccaccfb15b..dff17b9aa1de 100644 --- a/src/middlewared/middlewared/plugins/directoryservices_/ldap_health_mixin.py +++ b/src/middlewared/middlewared/plugins/directoryservices_/ldap_health_mixin.py @@ -15,11 +15,8 @@ def _recover_ldap(self, error: LDAPHealthError) -> None: our health check. """ match error.reason: - case LDAPHealthCheckFailReason.LDAP_BIND_FAILED: + case LDAPHealthCheckFailReason.LDAP_BIND_FAILED | LDAPHealthCheckFailReason.SSSD_STOPPED: self._recover_ldap_config() - case LDAPHealthCheckFailReason.SSSD_STOPPED: - # pick up with sssd restart below - pass case _: # not recoverable raise error from None diff --git a/src/middlewared/middlewared/plugins/enclosure_/enclosure_class.py b/src/middlewared/middlewared/plugins/enclosure_/enclosure_class.py index 795c07e06f60..d279f72be33b 100644 --- a/src/middlewared/middlewared/plugins/enclosure_/enclosure_class.py +++ b/src/middlewared/middlewared/plugins/enclosure_/enclosure_class.py @@ -188,6 +188,9 @@ def _get_model_and_controller(self): case 'HGST_H4060-J': self.model = JbodModels.ES60G2.value self.controller = False + case 'WDC_UData60': + self.model = JbodModels.ES60G3.value + self.controller = False case 'HGST_H4102-J': self.model = JbodModels.ES102.value self.controller = False @@ -609,6 +612,7 @@ def is_60_bay_jbod(self): self.model in ( JbodModels.ES60.value, JbodModels.ES60G2.value, + JbodModels.ES60G3.value, ) )) diff --git a/src/middlewared/middlewared/plugins/enclosure_/enums.py b/src/middlewared/middlewared/plugins/enclosure_/enums.py index 1f0aecb690b5..384ff2bb57a3 100644 --- a/src/middlewared/middlewared/plugins/enclosure_/enums.py +++ b/src/middlewared/middlewared/plugins/enclosure_/enums.py @@ -52,6 +52,7 @@ class JbodModels(Enum): ES24F = 'ES24F' ES60 = 'ES60' ES60G2 = 'ES60G2' + ES60G3 = 'ES60G3' ES102 = 'ES102' ES102G2 = 'ES102G2' diff --git a/src/middlewared/middlewared/plugins/iscsi_/extents.py b/src/middlewared/middlewared/plugins/iscsi_/extents.py index ebb317f75a2a..6087b0d51c39 100755 --- a/src/middlewared/middlewared/plugins/iscsi_/extents.py +++ b/src/middlewared/middlewared/plugins/iscsi_/extents.py @@ -335,8 +335,7 @@ def clean_type_and_path(self, data, schema_name, verrors): if not os.path.exists(device): verrors.add(f'{schema_name}.disk', f'Device {device!r} for volume {zvol_name!r} does not exist') - if self.middleware.call_sync('boot.is_boot_pool_path', device): - verrors.add(f'{schema_name}.disk', 'Disk residing in boot pool cannot be consumed and is not supported') + self.middleware.call_sync('iscsi.extent.validate_zvol_path', verrors, f'{schema_name}.disk', device) if '@' in zvol_name and not data['ro']: verrors.add(f'{schema_name}.ro', 'Must be set when disk is a ZFS Snapshot') diff --git a/src/middlewared/middlewared/plugins/keychain.py b/src/middlewared/middlewared/plugins/keychain.py index 2d62d0a547be..b47c41d97be0 100644 --- a/src/middlewared/middlewared/plugins/keychain.py +++ b/src/middlewared/middlewared/plugins/keychain.py @@ -11,12 +11,25 @@ from truenas_api_client import Client, ClientException +from middlewared.api import api_method +from middlewared.api.current import ( + KeychainCredentialEntry, + KeychainCredentialCreateArgs, KeychainCredentialCreateResult, + KeychainCredentialUpdateArgs, KeychainCredentialUpdateResult, + KeychainCredentialDeleteArgs, KeychainCredentialDeleteResult, + KeychainCredentialUsedByArgs, KeychainCredentialUsedByResult, + KeychainCredentialGetOfTypeArgs, KeychainCredentialGetOfTypeResult, + KeychainCredentialGenerateSSHKeyPairArgs, KeychainCredentialGenerateSSHKeyPairResult, + KeychainCredentialRemoteSSHHostKeyScanArgs, KeychainCredentialRemoteSSHHostKeyScanResult, + KeychainCredentialRemoteSSHSemiautomaticSetupArgs, KeychainCredentialRemoteSSHSemiautomaticSetupResult, + KeychainCredentialSSHPairArgs, KeychainCredentialSSHPairResult, +) from middlewared.service_exception import CallError, MatchNotFound -from middlewared.schema import accepts, Bool, Dict, Int, List, Patch, Password, Ref, returns, Str, ValidationErrors +from middlewared.schema import Int, Str, ValidationErrors from middlewared.service import CRUDService, private import middlewared.sqlalchemy as sa from middlewared.utils import run -from middlewared.validators import validate_schema, URL +from middlewared.validators import validate_schema class KeychainCredentialType: @@ -256,19 +269,9 @@ class Config: datastore = "system.keychaincredential" cli_namespace = "system.keychain_credential" role_prefix = "KEYCHAIN_CREDENTIAL" + entry = KeychainCredentialEntry - ENTRY = Patch( - "keychain_credential_create", "keychain_credential_entry", - ("add", Int("id")), - ) - - @accepts(Dict( - "keychain_credential_create", - Str("name", required=True, empty=False), - Str("type", required=True), - Dict("attributes", additional_attrs=True, required=True, private=True), - register=True, - )) + @api_method(KeychainCredentialCreateArgs, KeychainCredentialCreateResult) async def do_create(self, data): """ Create a Keychain Credential @@ -319,15 +322,7 @@ async def do_create(self, data): ) return data - @accepts( - Int("id"), - Patch( - "keychain_credential_create", - "keychain_credential_update", - ("attr", {"update": True}), - ("rm", {"name": "type"}), - ) - ) + @api_method(KeychainCredentialUpdateArgs, KeychainCredentialUpdateResult) async def do_update(self, id_, data): """ Update a Keychain Credential with specific `id` @@ -378,8 +373,7 @@ async def do_update(self, id_, data): return new - @accepts(Int("id"), Dict("options", Bool("cascade", default=False))) - @returns() + @api_method(KeychainCredentialDeleteArgs, KeychainCredentialDeleteResult) async def do_delete(self, id_, options): """ Delete Keychain Credential with specific `id` @@ -413,12 +407,7 @@ async def do_delete(self, id_, options): id_, ) - @accepts(Int("id")) - @returns(List("credential_results", items=[Dict( - "credential_result", - Str("title"), - Str("unbind_method"), - )])) + @api_method(KeychainCredentialUsedByArgs, KeychainCredentialUsedByResult) async def used_by(self, id_): """ Returns list of objects that use this credential. @@ -457,9 +446,7 @@ async def _validate(self, schema_name, data, id_=None): verrors.check() - @private - @accepts(Int("id"), Str("type")) - @returns(Ref("keychain_credential_entry")) + @api_method(KeychainCredentialGetOfTypeArgs, KeychainCredentialGetOfTypeResult, private=True) async def get_of_type(self, id_, type_): try: credential = await self.middleware.call("keychaincredential.query", [["id", "=", id_]], {"get": True}) @@ -474,12 +461,8 @@ async def get_of_type(self, id_, type_): return credential - @accepts(roles=["KEYCHAIN_CREDENTIAL_WRITE"]) - @returns(Dict( - "ssh_key_pair", - Str("private_key", max_length=None, required=True), - Str("public_key", max_length=None, required=True), - )) + @api_method(KeychainCredentialGenerateSSHKeyPairArgs, KeychainCredentialGenerateSSHKeyPairResult, + roles=["KEYCHAIN_CREDENTIAL_WRITE"]) def generate_ssh_key_pair(self): """ Generate a public/private key pair @@ -510,16 +493,8 @@ def generate_ssh_key_pair(self): "public_key": public_key, } - @accepts( - Dict( - "keychain_remote_ssh_host_key_scan", - Str("host", required=True, empty=False), - Str("port", default=22), - Int("connect_timeout", default=10), - ), - roles=["KEYCHAIN_CREDENTIAL_WRITE"], - ) - @returns(Str("remove_ssh_host_key", max_length=None)) + @api_method(KeychainCredentialRemoteSSHHostKeyScanArgs, KeychainCredentialRemoteSSHHostKeyScanResult, + roles=["KEYCHAIN_CREDENTIAL_WRITE"]) async def remote_ssh_host_key_scan(self, data): """ Discover a remote host key @@ -554,25 +529,8 @@ async def remote_ssh_host_key_scan(self, data): else: raise CallError(f"ssh-keyscan failed: {proc.stdout + proc.stderr}") - @accepts( - Dict( - "keychain_remote_ssh_semiautomatic_setup", - Str("name", required=True), - Str("url", required=True, validators=[URL()]), - Bool("verify_ssl", default=True), - Password("token"), - Str("admin_username", default="root"), - Password("password"), - Password("otp_token"), - Str("username", default="root"), - Int("private_key", required=True, private=True), - Int("connect_timeout", default=10), - Bool("sudo", default=False), - register=True, - ), - roles=["KEYCHAIN_CREDENTIAL_WRITE"], - ) - @returns(Ref("keychain_credential_entry")) + @api_method(KeychainCredentialRemoteSSHSemiautomaticSetupArgs, KeychainCredentialRemoteSSHSemiautomaticSetupResult, + roles=["KEYCHAIN_CREDENTIAL_WRITE"]) def remote_ssh_semiautomatic_setup(self, data): """ Perform semi-automatic SSH connection setup with other FreeNAS machine @@ -664,13 +622,7 @@ def remote_ssh_semiautomatic_setup(self, data): } }) - @private - @accepts(Dict( - "keychain_ssh_pair", - Str("remote_hostname", required=True), - Str("username", default="root"), - Str("public_key", required=True), - )) + @api_method(KeychainCredentialSSHPairArgs, KeychainCredentialSSHPairResult, private=True) def ssh_pair(self, data): """ Receives public key, storing it to accept SSH connection and return diff --git a/src/middlewared/middlewared/plugins/keychain_/ssh_connections.py b/src/middlewared/middlewared/plugins/keychain_/ssh_connections.py index 259432f10472..805d6b6743ce 100644 --- a/src/middlewared/middlewared/plugins/keychain_/ssh_connections.py +++ b/src/middlewared/middlewared/plugins/keychain_/ssh_connections.py @@ -1,37 +1,12 @@ -from middlewared.schema import accepts, Bool, Dict, Int, Patch, Ref, returns, Str +from middlewared.api import api_method +from middlewared.api.current import KeychainCredentialSetupSSHConnectionArgs, KeychainCredentialSetupSSHConnectionResult from middlewared.service import Service, ValidationErrors class KeychainCredentialService(Service): - @accepts( - Dict( - 'setup_ssh_connection', - Dict( - 'private_key', - Bool('generate_key', default=True), - Int('existing_key_id'), - Str('name', empty=False), - ), - Str('connection_name', required=True), - Str('setup_type', required=True, enum=['SEMI-AUTOMATIC', 'MANUAL'], default='MANUAL'), - Patch( - 'keychain_remote_ssh_semiautomatic_setup', 'semi_automatic_setup', - ('rm', {'name': 'name'}), - ('rm', {'name': 'private_key'}), - ('attr', {'null': True}), - ('attr', {'default': None}), - ), - Dict( - 'manual_setup', - additional_attrs=True, - null=True, - default=None, - ) - ), - roles=['KEYCHAIN_CREDENTIAL_WRITE'], - ) - @returns(Ref('keychain_credential_entry')) + @api_method(KeychainCredentialSetupSSHConnectionArgs, KeychainCredentialSetupSSHConnectionResult, + roles=['KEYCHAIN_CREDENTIAL_WRITE']) async def setup_ssh_connection(self, options): """ Creates a SSH Connection performing the following steps: diff --git a/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py b/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py index 64f5a8863417..918c4570436a 100644 --- a/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py +++ b/src/middlewared/middlewared/plugins/kubernetes_to_docker/list_utils.py @@ -74,6 +74,9 @@ def release_details( **secrets, }) + if config['app_name'] == 'ix-chart' and release_train == 'stable': + config['app_name'] = 'ix-app' + if config['app_name'] not in apps_mapping[release_train]: return config | {'error': 'Unable to locate release\'s app'} diff --git a/src/middlewared/middlewared/plugins/mail.py b/src/middlewared/middlewared/plugins/mail.py index bfd01bd134fe..0c7a01f60c50 100644 --- a/src/middlewared/middlewared/plugins/mail.py +++ b/src/middlewared/middlewared/plugins/mail.py @@ -492,11 +492,11 @@ def _from_addr(self, config): @private async def local_administrators_emails(self): - return list(set( - user["email"] - for user in await self.middleware.call("privilege.local_administrators") - if user["email"] - )) + return list(set(user["email"] for user in await self.middleware.call("user.query", [ + ["roles", "rin", "FULL_ADMIN"], + ["local", "=", True], + ["email", "!=", None] + ]))) @private async def local_administrator_email(self): diff --git a/src/middlewared/middlewared/plugins/nfs.py b/src/middlewared/middlewared/plugins/nfs.py index 20440b7b23e1..af4f9e344de9 100644 --- a/src/middlewared/middlewared/plugins/nfs.py +++ b/src/middlewared/middlewared/plugins/nfs.py @@ -56,7 +56,6 @@ class NFSModel(sa.Model): nfs_srv_servers = sa.Column(sa.Integer(), nullable=True) nfs_srv_allow_nonroot = sa.Column(sa.Boolean(), default=False) nfs_srv_protocols = sa.Column(sa.JSON(list), default=[NFSProtocol.NFSv3, NFSProtocol.NFSv4]) - nfs_srv_v4_v3owner = sa.Column(sa.Boolean(), default=False) nfs_srv_v4_krb = sa.Column(sa.Boolean(), default=False) nfs_srv_bindip = sa.Column(sa.MultiSelectField()) nfs_srv_mountd_port = sa.Column(sa.SmallInteger(), nullable=True) @@ -86,7 +85,6 @@ class Config: Int('servers', null=True, validators=[Range(min_=1, max_=256)], required=True), Bool('allow_nonroot', required=True), List('protocols', items=[Str('protocol', enum=NFSProtocol.choices())], required=True), - Bool('v4_v3owner', required=True), Bool('v4_krb', required=True), Str('v4_domain', required=True), List('bindip', items=[IPAddr('ip')], required=True), @@ -288,9 +286,6 @@ async def do_update(self, data): INPUT: Select NFSv3 or NFSv4 or NFSv3,NFSv4 Default: NFSv3,NFSv4 - `v4_v3owner` - when set means that system will use NFSv3 ownership model for NFSv4. - (Deprecated) - `v4_krb` - Force Kerberos authentication on NFS shares If enabled, NFS shares will fail if the Kerberos ticket is unavilable @@ -345,8 +340,6 @@ async def do_update(self, data): 'nfs_update.protocols', 'Must specify at least one value ("NFSV3", "NFSV4") in the "protocols" list.' ) - if NFSProtocol.NFSv4 not in data.get("protocols"): - data.setdefault("v4_v3owner", False) old = await self.config() @@ -395,13 +388,6 @@ async def do_update(self, data): await self.middleware.call('etc.generate', 'smb') await self.middleware.call('service.reload', 'idmap') - if NFSProtocol.NFSv4 not in new["protocols"] and new["v4_v3owner"]: - verrors.add("nfs_update.v4_v3owner", "This option requires enabling NFSv4") - - if new["v4_v3owner"] and new["userd_manage_gids"]: - verrors.add( - "nfs_update.userd_manage_gids", "This option is incompatible with NFSv3 ownership model for NFSv4") - if NFSProtocol.NFSv4 not in new["protocols"] and new["v4_domain"]: verrors.add("nfs_update.v4_domain", "This option does not apply to NFSv3") diff --git a/src/middlewared/middlewared/plugins/ntp.py b/src/middlewared/middlewared/plugins/ntp.py index a39c5a5fc438..a0fc2745f4db 100644 --- a/src/middlewared/middlewared/plugins/ntp.py +++ b/src/middlewared/middlewared/plugins/ntp.py @@ -230,15 +230,12 @@ async def clean(self, data, schema_name): verrors = ValidationErrors() maxpoll = data['maxpoll'] minpoll = data['minpoll'] - force = data.pop('force', False) - usable = True if await self.middleware.run_in_thread( - self.test_ntp_server, data['address']) else False - - if not force and not usable: - verrors.add(f'{schema_name}.address', - 'Server could not be reached. Check "Force" to ' - 'continue regardless.' - ) + if not data.pop('force', False): + if not await self.middleware.run_in_thread(self.test_ntp_server, data['address']): + verrors.add( + f'{schema_name}.address', + 'Server could not be reached. Check "Force" to continue regardless.' + ) if not maxpoll > minpoll: verrors.add(f'{schema_name}.maxpoll', diff --git a/src/middlewared/middlewared/plugins/service.py b/src/middlewared/middlewared/plugins/service.py index efef7079299a..a4438f92c69d 100644 --- a/src/middlewared/middlewared/plugins/service.py +++ b/src/middlewared/middlewared/plugins/service.py @@ -158,7 +158,7 @@ async def do_update(self, app, audit_callback, id_or_name, data): ), roles=['SERVICE_WRITE', 'SHARING_NFS_WRITE', 'SHARING_SMB_WRITE', 'SHARING_ISCSI_WRITE', 'SHARING_FTP_WRITE'] ) - @returns(Bool('started_service')) + @returns(Bool('started_service', description='Will return `true` if service successfully started')) @pass_app(rest=True) async def start(self, app, service, options): """ @@ -269,11 +269,13 @@ async def stop(self, app, service, options): if service_object.deprecated: await self.middleware.call('alert.oneshot_delete', 'DeprecatedService', service_object.name) - return False + return True else: self.logger.error("Service %r running after stop", service) await self.middleware.call('service.notify_running', service) - return True + if options['silent']: + return False + raise CallError(await service_object.failure_logs() or 'Service still running after stop') @accepts( Str('service'), diff --git a/src/middlewared/middlewared/plugins/smb.py b/src/middlewared/middlewared/plugins/smb.py index 18cb8cbf8701..8bee48dbc03f 100644 --- a/src/middlewared/middlewared/plugins/smb.py +++ b/src/middlewared/middlewared/plugins/smb.py @@ -382,21 +382,12 @@ async def configure(self, job, create_paths=True): job.set_progress(30, 'Setting up server SID.') await self.middleware.call('smb.set_system_sid') - """ - If the ldap passdb backend is being used, then the remote LDAP server - will provide the SMB users and groups. We skip these steps to avoid having - samba potentially try to write our local users and groups to the remote - LDAP server. - - """ - passdb_backend = await self.middleware.call('smb.getparm', 'passdb backend', 'global') - if passdb_backend.startswith("tdbsam"): - job.set_progress(40, 'Synchronizing passdb and groupmap.') - await self.middleware.call('etc.generate', 'user') - pdb_job = await self.middleware.call("smb.synchronize_passdb", True) - grp_job = await self.middleware.call("smb.synchronize_group_mappings", True) - await pdb_job.wait() - await grp_job.wait() + job.set_progress(40, 'Synchronizing passdb and groupmap.') + await self.middleware.call('etc.generate', 'user') + pdb_job = await self.middleware.call("smb.synchronize_passdb", True) + grp_job = await self.middleware.call("smb.synchronize_group_mappings", True) + await pdb_job.wait() + await grp_job.wait() """ The following steps ensure that we cleanly import our SMB shares @@ -427,7 +418,11 @@ async def configure(self, job, create_paths=True): job.set_progress(70, 'Checking SMB server status.') if await self.middleware.call("service.started_or_enabled", "cifs"): job.set_progress(80, 'Restarting SMB service.') - await self.middleware.call("service.restart", "cifs") + await self.middleware.call("service.restart", "cifs", {"ha_propagate": False}) + + # Ensure that winbind is running once we configure SMB service + await self.middleware.call('service.restart', 'idmap', {'ha_propagate': False}) + job.set_progress(100, 'Finished configuring SMB.') @private diff --git a/src/middlewared/middlewared/plugins/smb_/sharesec.py b/src/middlewared/middlewared/plugins/smb_/sharesec.py index a901156b0416..6fed0591c809 100644 --- a/src/middlewared/middlewared/plugins/smb_/sharesec.py +++ b/src/middlewared/middlewared/plugins/smb_/sharesec.py @@ -185,7 +185,7 @@ async def synchronize_acls(self): if not (share_acl := filter_list(entries, [['key', '=', f'SECDESC/{share_name.lower()}']])): continue - if share_acl[0] != s['share_acl']: + if share_acl[0]['value'] != s['share_acl']: self.logger.debug('Updating stored copy of SMB share ACL on %s', share_name) await self.middleware.call( 'datastore.update', diff --git a/src/middlewared/middlewared/plugins/vm/devices/storage_devices.py b/src/middlewared/middlewared/plugins/vm/devices/storage_devices.py index 703bdaf8cd1b..8b3a938fe501 100644 --- a/src/middlewared/middlewared/plugins/vm/devices/storage_devices.py +++ b/src/middlewared/middlewared/plugins/vm/devices/storage_devices.py @@ -2,6 +2,7 @@ import os from middlewared.plugins.zfs_.utils import zvol_name_to_path, zvol_path_to_name +from middlewared.plugins.zfs_.validation_utils import check_zvol_in_boot_pool_using_path from middlewared.schema import Bool, Dict, Int, Str from middlewared.validators import Match @@ -162,6 +163,8 @@ def _validate(self, device, verrors, old=None, vm_instance=None, update=True): verrors.add('attributes.path', 'Disk path is required.') elif not path.startswith('/dev/zvol/'): verrors.add('attributes.path', 'Disk path must start with "/dev/zvol/"') + elif check_zvol_in_boot_pool_using_path(path): + verrors.add('attributes.path', 'Disk residing in boot pool cannot be consumed and is not supported') else: zvol = self.middleware.call_sync( 'zfs.dataset.query', [['id', '=', zvol_path_to_name(path)]], {'extra': {'properties': []}} diff --git a/src/middlewared/middlewared/plugins/zfs_/validation_utils.py b/src/middlewared/middlewared/plugins/zfs_/validation_utils.py index bbafaeaf81ab..32de8429b856 100644 --- a/src/middlewared/middlewared/plugins/zfs_/validation_utils.py +++ b/src/middlewared/middlewared/plugins/zfs_/validation_utils.py @@ -1,5 +1,16 @@ import libzfs +from .utils import zvol_name_to_path + + +def check_zvol_in_boot_pool_using_name(zvol_name: str) -> bool: + return check_zvol_in_boot_pool_using_path(zvol_name_to_path(zvol_name)) + + +def check_zvol_in_boot_pool_using_path(zvol_path: str) -> bool: + from middlewared.plugins.boot import BOOT_POOL_NAME + return zvol_path.startswith(f'/dev/zvol/{BOOT_POOL_NAME}/') + def validate_pool_name(name: str) -> bool: return libzfs.validate_pool_name(name) diff --git a/src/middlewared/middlewared/pytest/unit/api/handler/accept/test_default.py b/src/middlewared/middlewared/pytest/unit/api/handler/accept/test_default.py new file mode 100644 index 000000000000..38aa3b451dd4 --- /dev/null +++ b/src/middlewared/middlewared/pytest/unit/api/handler/accept/test_default.py @@ -0,0 +1,16 @@ +from pydantic import Field + +from middlewared.api.base import BaseModel +from middlewared.api.base.handler.accept import accept_params + + +def test_default_dict(): + class Options(BaseModel): + force: bool = False + + class MethodArgs(BaseModel): + id: int + options: Options = Field(default=Options()) + + assert accept_params(MethodArgs, [1, {"force": True}]) == [1, {"force": True}] + assert accept_params(MethodArgs, [1]) == [1, {"force": False}] diff --git a/src/middlewared/middlewared/pytest/unit/utils/test_groupmap.py b/src/middlewared/middlewared/pytest/unit/utils/test_groupmap.py index 9bc6ea1c94b4..0d8a144ecda5 100644 --- a/src/middlewared/middlewared/pytest/unit/utils/test_groupmap.py +++ b/src/middlewared/middlewared/pytest/unit/utils/test_groupmap.py @@ -22,6 +22,11 @@ @pytest.fixture(scope='module') def groupmap_dir(): os.makedirs('/var/db/system/samba4', exist_ok=True) + try: + # pre-emptively delete in case we're running on a TrueNAS VM + os.unlink('/var/db/system/samba4/group_mapping.tdb') + except FileNotFoundError: + pass @pytest.fixture(scope='module') diff --git a/src/middlewared/middlewared/service/sharing_service.py b/src/middlewared/middlewared/service/sharing_service.py index bd64ed89a8a4..73d96d8c823d 100644 --- a/src/middlewared/middlewared/service/sharing_service.py +++ b/src/middlewared/middlewared/service/sharing_service.py @@ -1,9 +1,6 @@ -import errno -import os - -from middlewared.service_exception import CallError -from middlewared.utils.path import FSLocation, path_location, strip_location_prefix from middlewared.async_validators import check_path_resides_within_volume +from middlewared.plugins.zfs_.validation_utils import check_zvol_in_boot_pool_using_path +from middlewared.utils.path import FSLocation, path_location, strip_location_prefix from .crud_service import CRUDService from .decorators import pass_app, private @@ -62,6 +59,11 @@ async def validate_external_path(self, verrors, name, path): # validation here because we can't predict what is required. raise NotImplementedError + @private + async def validate_zvol_path(self, verrors, name, path): + if check_zvol_in_boot_pool_using_path(path): + verrors.add(name, 'Disk residing in boot pool cannot be consumed and is not supported') + @private async def validate_local_path(self, verrors, name, path): await check_path_resides_within_volume(verrors, self.middleware, name, path) @@ -70,6 +72,7 @@ async def validate_local_path(self, verrors, name, path): async def validate_path_field(self, data, schema, verrors): name = f'{schema}.{self.path_field}' path = await self.get_path_field(data) + await self.validate_zvol_path(verrors, name, path) loc = path_location(path) if loc not in self.allowed_path_types: diff --git a/src/middlewared/middlewared/test/integration/assets/pool.py b/src/middlewared/middlewared/test/integration/assets/pool.py index e07bc96e2978..1bfa81194611 100644 --- a/src/middlewared/middlewared/test/integration/assets/pool.py +++ b/src/middlewared/middlewared/test/integration/assets/pool.py @@ -67,11 +67,10 @@ def dataset(name, data=None, pool=pool, **kwargs): call("pool.dataset.create", {"name": dataset, **data}) try: - if "acl" in kwargs or "mode" in kwargs: - if "acl" in kwargs: - call("filesystem.setacl", {'path': f"/mnt/{dataset}", "dacl": kwargs['acl']}) - else: - call("filesystem.setperm", {'path': f"/mnt/{dataset}", "mode": kwargs['mode'] or "777"}) + if "acl" in kwargs: + call("filesystem.setacl", {'path': f"/mnt/{dataset}", "dacl": kwargs['acl']}) + elif "mode" in kwargs: + call("filesystem.setperm", {'path': f"/mnt/{dataset}", "mode": kwargs['mode'] or "777"}) yield dataset finally: diff --git a/src/middlewared/middlewared/utils/crypto.py b/src/middlewared/middlewared/utils/crypto.py index 25880fad0ce3..5f5d826f2999 100644 --- a/src/middlewared/middlewared/utils/crypto.py +++ b/src/middlewared/middlewared/utils/crypto.py @@ -1,3 +1,5 @@ +from base64 import b64encode +from hashlib import pbkdf2_hmac from secrets import choice, compare_digest, token_urlsafe, token_hex from string import ascii_letters, digits, punctuation @@ -63,3 +65,16 @@ def generate_nt_hash(passwd): """ md4_hash_bytes = md4_hash_blob(passwd.encode('utf-16le')) return md4_hash_bytes.hex().upper() + + +def generate_pbkdf2_512(passwd): + """ + Generate a pbkdf2_sha512 hash for password. This is used for + verification of API keys. + """ + prefix = 'pbkdf2-sha512' + rounds = 500000 + salt_length = 16 + salt = generate_string(string_size=salt_length, extra_chars='./').encode() + hash = pbkdf2_hmac('sha512', passwd.encode(), salt, rounds) + return f'${prefix}${rounds}${b64encode(salt).decode()}${b64encode(hash).decode()}' diff --git a/src/middlewared/middlewared/utils/license.py b/src/middlewared/middlewared/utils/license.py index d6c216f733e6..3fa7ce4b4f79 100644 --- a/src/middlewared/middlewared/utils/license.py +++ b/src/middlewared/middlewared/utils/license.py @@ -11,4 +11,5 @@ 10: "ES102G2", 11: "ES60G2", 12: "ES24N", + 13: "ES60G3", } diff --git a/src/middlewared/middlewared/utils/tdb.py b/src/middlewared/middlewared/utils/tdb.py index d67ef9931700..974990d8b616 100644 --- a/src/middlewared/middlewared/utils/tdb.py +++ b/src/middlewared/middlewared/utils/tdb.py @@ -100,6 +100,12 @@ class TDBHandle: opath_fd = FD_CLOSED keys_null_terminated = False + def __enter__(self): + return self + + def __exit__(self, tp, val, traceback): + self.close() + def close(self): """ Close the TDB handle and O_PATH open for the file """ if self.opath_fd == FD_CLOSED and self.hdl is None: diff --git a/src/middlewared/middlewared/utils/user_api_key.py b/src/middlewared/middlewared/utils/user_api_key.py new file mode 100644 index 000000000000..40eb94207f2d --- /dev/null +++ b/src/middlewared/middlewared/utils/user_api_key.py @@ -0,0 +1,113 @@ +import os + +from base64 import b64encode +from dataclasses import dataclass +from struct import pack +from uuid import uuid4 +from .tdb import ( + TDBDataType, + TDBHandle, + TDBOptions, + TDBPathType, +) + + +PAM_TDB_DIR = '/var/run/pam_tdb' +PAM_TDB_FILE = os.path.join(PAM_TDB_DIR, 'pam_tdb.tdb') +PAM_TDB_DIR_MODE = 0o700 +PAM_TDB_VERSION = 1 +PAM_TDB_MAX_KEYS = 10 # Max number of keys per user. Also defined in pam_tdb.c + +PAM_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES) + + +@dataclass(frozen=True) +class UserApiKey: + expiry: int + dbid: int + userhash: str + + +@dataclass(frozen=True) +class PamTdbEntry: + keys: list[UserApiKey] + username: str + + +def _setup_pam_tdb_dir() -> None: + os.makedirs(PAM_TDB_DIR, mode=PAM_TDB_DIR_MODE, exist_ok=True) + os.chmod(PAM_TDB_DIR, PAM_TDB_DIR_MODE) + + +def _pack_user_api_key(api_key: UserApiKey) -> bytes: + """ + Convert UserApiKey object to bytes for TDB insertion. + This is packed struct with expiry converted into signed 64 bit + integer, the database id (32-bit unsigned), and the userhash (pascal string) + """ + if not isinstance(api_key, UserApiKey): + raise TypeError(f'{type(api_key)}: not a UserApiKey') + + userhash = api_key.userhash.encode() + b'\x00' + return pack(f' None: + """ + Convert PamTdbEntry object into a packed struct and insert + into tdb file. + + key: username + value: uint32_t (version) + uint32_t (cnt of keys) + """ + if not isinstance(entry, PamTdbEntry): + raise TypeError(f'{type(entry)}: expected PamTdbEntry') + + key_cnt = len(entry.keys) + if key_cnt > PAM_TDB_MAX_KEYS: + raise ValueError(f'{key_cnt}: count of entries exceeds maximum') + + entry_bytes = pack(' None: + """ + Write a PamTdbEntry object to the pam_tdb file for user + authentication. This method first writes to temporary file + and then renames over pam_tdb file to ensure flush is atomic + and reduce risk of lock contention while under a transaction + lock. + + raises: + TypeError - not PamTdbEntry + AssertionError - count of entries changed while generating + tdb payload + RuntimeError - TDB library error + """ + _setup_pam_tdb_dir() + + if not isinstance(pam_entries, list): + raise TypeError('Expected list of PamTdbEntry objects') + + tmp_path = os.path.join(PAM_TDB_DIR, f'tmp_{uuid4()}.tdb') + + with TDBHandle(tmp_path, PAM_TDB_OPTIONS) as hdl: + hdl.keys_null_terminated = False + + try: + for entry in pam_entries: + write_entry(hdl, entry) + except Exception: + os.remove(tmp_path) + raise + + os.rename(tmp_path, PAM_TDB_FILE) diff --git a/tests/api2/test_005_interface.py b/tests/api2/test_005_interface.py index 2f2fb761c329..c376fbc0933f 100644 --- a/tests/api2/test_005_interface.py +++ b/tests/api2/test_005_interface.py @@ -127,7 +127,7 @@ def test_003_recheck_ipvx(request): assert int(call("tunable.get_sysctl", f"net.ipv6.conf.{interface}.autoconf")) == 0 -@pytest.mark.skipif(ha, reason="Test valid on HA systems only") +@pytest.mark.skipif(not ha, reason="Test valid on HA systems only") def test_004_remove_critical_failover_group(request): with pytest.raises(ValidationErrors) as ve: call( diff --git a/tests/api2/test_011_user.py b/tests/api2/test_011_user.py index c92791d0f279..9ffd98b360a0 100644 --- a/tests/api2/test_011_user.py +++ b/tests/api2/test_011_user.py @@ -212,7 +212,7 @@ def test_002_verify_user_exists_in_pwd(request): assert pw['pw_dir'] == VAR_EMPTY # At this point, we're not an SMB user - assert pw['sid'] is not None + assert pw['sid'] is None assert pw['source'] == 'LOCAL' assert pw['local'] is True diff --git a/tests/api2/test_050_alert.py b/tests/api2/test_050_alert.py deleted file mode 100644 index 0ac59191c27e..000000000000 --- a/tests/api2/test_050_alert.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python3 - -import pytest -import os -import sys -from pytest_dependency import depends -from time import sleep -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST, SSH_TEST -from auto_config import password, user, pool_name -from middlewared.test.integration.utils import call - - - -def test_01_get_alert_list(): - results = GET("/alert/list/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - - -def test_02_get_alert_list_categories(): - results = GET("/alert/list_categories/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - assert results.json(), results.json() - - -def test_03_get_alert_list_policies(): - results = GET("/alert/list_policies/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - assert results.json(), results.json() - - -@pytest.mark.dependency(name='degrade_pool') -def test_04_degrading_a_pool_to_create_an_alert(request): - global gptid - get_pool = GET(f"/pool/?name={pool_name}").json()[0] - id_path = '/dev/disk/by-partuuid/' - gptid = get_pool['topology']['data'][0]['path'].replace(id_path, '') - cmd = f'zinject -d {gptid} -A fault {pool_name}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -def test_05_verify_the_pool_is_degraded(request): - depends(request, ['degrade_pool'], scope="session") - cmd = f'zpool status {pool_name} | grep {gptid}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - assert 'DEGRADED' in results['output'], results['output'] - - -@pytest.mark.timeout(120) -def test_06_wait_for_the_alert_and_get_the_id(request): - depends(request, ["degrade_pool"], scope="session") - global alert_id - call("alert.process_alerts") - while True: - for line in GET("/alert/list/").json(): - if ( - line['source'] == 'VolumeStatus' and - line['args']['volume'] == pool_name and - line['args']['state'] == 'DEGRADED' - ): - alert_id = line['id'] - return - - sleep(1) - - -def test_08_dimiss_the_alert(request): - depends(request, ["degrade_pool"], scope="session") - results = POST("/alert/dismiss/", alert_id) - assert results.status_code == 200, results.text - assert isinstance(results.json(), type(None)), results.text - - -def test_09_verify_the_alert_is_dismissed(request): - depends(request, ["degrade_pool"], scope="session") - results = GET("/alert/list/") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - for line in results.json(): - if line['id'] == alert_id: - assert line['dismissed'] is True, results.text - break - - -def test_10_restore_the_alert(request): - depends(request, ["degrade_pool"], scope="session") - results = POST("/alert/restore/", alert_id) - assert results.status_code == 200, results.text - assert isinstance(results.json(), type(None)), results.text - - -def test_11_verify_the_alert_is_restored(request): - depends(request, ["degrade_pool"], scope="session") - results = GET(f"/alert/list/?id={alert_id}") - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - for line in results.json(): - if line['id'] == alert_id: - assert line['dismissed'] is False, results.text - break - - -def test_12_clear_the_pool_degradation(request): - depends(request, ["degrade_pool"], scope="session") - cmd = f'zpool clear {pool_name}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - - -def test_13_verify_the_pool_is_not_degraded(request): - depends(request, ["degrade_pool"], scope="session") - cmd = f'zpool status {pool_name} | grep {gptid}' - results = SSH_TEST(cmd, user, password) - assert results['result'] is True, results['output'] - assert 'DEGRADED' not in results['output'], results['output'] - - -@pytest.mark.timeout(120) -def test_14_wait_for_the_alert_to_disappear(request): - depends(request, ["degrade_pool"], scope="session") - while True: - if alert_id not in GET("/alert/list/").text: - assert True - break - sleep(1) diff --git a/tests/api2/test_140_core.py b/tests/api2/test_140_core.py deleted file mode 100644 index c32d3a4b49f3..000000000000 --- a/tests/api2/test_140_core.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD - -import pytest -import sys -import os -from urllib.request import urlretrieve -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET, POST -from middlewared.test.integration.utils.client import truenas_server - - -def test_01_get_core_jobs(): - results = GET('/core/get_jobs/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True - - -def test_02_get_core_ping(): - results = GET('/core/ping/') - assert results.status_code == 200, results.text - assert isinstance(results.json(), str) is True - assert results.json() == 'pong' - - -def test_03_get_download_info_for_config_dot_save(): - payload = { - 'method': 'config.save', - 'args': [], - 'filename': 'freenas.db' - } - results = POST('/core/download/', payload) - - assert results.status_code == 200, results.text - assert isinstance(results.json(), list) is True, results.text - global url - url = results.json()[1] - global job_id - job_id = results.json()[0] - - -def test_04_verify_job_id_state_is_running(): - results = GET(f'/core/get_jobs/?id={job_id}') - assert results.json()[0]['state'] == 'RUNNING', results.text - - -def test_05_download_from_url(): - rv = urlretrieve(f'http://{truenas_server.ip}{url}') - stat = os.stat(rv[0]) - assert stat.st_size > 0 - - -def test_06_verify_job_id_state_is_success(): - results = GET(f'/core/get_jobs/?id={job_id}') - assert results.json()[0]['state'] == 'SUCCESS', results.text diff --git a/tests/api2/test_300_nfs.py b/tests/api2/test_300_nfs.py index b360ddaa2a41..2cbc9a8b446e 100644 --- a/tests/api2/test_300_nfs.py +++ b/tests/api2/test_300_nfs.py @@ -61,7 +61,6 @@ class NFS_CONFIG: default_config = { "allow_nonroot": False, "protocols": ["NFSV3", "NFSV4"], - "v4_v3owner": False, "v4_krb": False, "v4_domain": "", "bindip": [], diff --git a/tests/api2/test_341_pool_dataset_encryption.py b/tests/api2/test_341_pool_dataset_encryption.py deleted file mode 100644 index 78e231ef3d66..000000000000 --- a/tests/api2/test_341_pool_dataset_encryption.py +++ /dev/null @@ -1,1130 +0,0 @@ -#!/usr/bin/env python3 - -# License: BSD - -import secrets - -import pytest -from middlewared.test.integration.utils import call -from pytest_dependency import depends - -from auto_config import password, user -from functions import DELETE, GET, POST, PUT, SSH_TEST, wait_on_job - -# genrated token_hex 32bit for -pool_token_hex = secrets.token_hex(32) -pool_token_hex2 = secrets.token_hex(32) -dataset_token_hex = secrets.token_hex(32) -dataset_token_hex2 = secrets.token_hex(32) -encrypted_pool_name = 'test_encrypted' -dataset = f'{encrypted_pool_name}/encrypted' -dataset_url = dataset.replace('/', '%2F') -child_dataset = f'{dataset}/child' -child_dataset_url = child_dataset.replace('/', '%2F') - - -@pytest.mark.dependency(name="CREATED_POOL") -def test_create_a_normal_pool(request): - global pool_id, pool_disks - # Get one disk for encryption testing - pool_disks = [call("disk.get_unused")[0]["name"]] - payload = { - 'name': encrypted_pool_name, - 'encryption': False, - 'topology': { - 'data': [ - {'type': 'STRIPE', 'disks': pool_disks} - ], - }, - "allow_duplicate_serials": True, - } - results = POST('/pool/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 240) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - pool_id = job_status['results']['result']['id'] - - -def test_create_a_passphrase_encrypted_root_on_normal_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': False, - 'pbkdf2iters': 100000, - 'algorithm': 'AES-128-CCM', - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_verify_pool_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_add_the_comment_on_the_passphrase_encrypted_root(request): - depends(request, ['CREATED_POOL']) - payload = { - 'comments': 'testing encrypted dataset' - } - results = PUT(f'/pool/dataset/id/{dataset_url}/', payload) - assert results.status_code == 200, results.text - - -def test_change_a_passphrase_encrypted_root_key_encryption(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'key': dataset_token_hex, - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_that_the_dataset_encrypted_root_changed_to_key_encryption(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_passphrase_encrypted_root(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_not_encrypted_dataset_on_a_normal_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption': False, - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] is None, results.text - - -def test_delete_not_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_a_dataset_with_inherit_encryption_true_on_a_normal_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_that_the_dataset_created_is_not_encrypted_like_the_parrent(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] is None, results.text - - -def test_delete_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_try_to_create_an_encrypted_dataset_with_pbkdf2itersl_zero(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'pbkdf2iters': 0, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - assert 'Should be greater or equal than 100000' in results.text, results.text - - -def test_try_to_create_an_encrypted_dataset_with_inherit_encryption_true(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - assert 'Must be disabled when encryption is enabled' in results.text, results.text - - -def test_verify_pool_encrypted_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_try_to_create_an_encrypted_dataset_with_passphrase_and_generate_key(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - assert 'Must be disabled when dataset is to be encrypted with passphrase' in results.text, results.text - - -def test_create_an_encrypted_root_with_generate_key(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_generate_key_encrypted_root(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_an_encrypted_root_with_a_key(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_verify_pool_encrypted_root_dataset_does_not_leak_encryption_key_into_middleware_log(request): - cmd = f"""grep -R "{dataset_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_make_sure_we_are_not_able_to_lock_key_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'FAILED', str(job_status['results']) - assert 'Only datasets which are encrypted with passphrase can be locked' in job_status['results']['error'], \ - job_status['results']['error'] - - -def test_change_a_key_encrypted_dataset_to_passphrase(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'passphrase': 'my_passphrase' - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_that_the_dataset_changed_to_passphrase(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_verify_pool_dataset_change_key_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_lock_passphrase_encrypted_datasets_and_ensure_they_get_locked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_passphrase_encrypted_root_is_locked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['key_format'] == 'PASSPHRASE', str(job_status_result) - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_passphrase_encrypted_datasets_with_wrong_passphrase(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'bad_passphrase' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['failed'][dataset]['error'] == 'Invalid Key', str(job_status['results']) - - -def test_verify_passphrase_encrypted_root_still_locked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['key_format'] == 'PASSPHRASE', str(job_status_result) - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_passphrase_encrypted_datasets(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'my_passphrase' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [dataset], str(job_status['results']) - - -def test_verify_passphrase_encrypted_root_is_unlocked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['key_format'] == 'PASSPHRASE', str(job_status_result) - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_delete_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_delete_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'cascade': True, - 'restart_services': True, - 'destroy': True - } - results = POST(f'/pool/id/{pool_id}/export/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_create_a_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - global pool_id - payload = { - 'name': encrypted_pool_name, - 'encryption': True, - 'encryption_options': { - 'algorithm': 'AES-128-CCM', - 'passphrase': 'my_pool_passphrase', - }, - 'topology': { - 'data': [ - {'type': 'STRIPE', 'disks': pool_disks} - ], - }, - "allow_duplicate_serials": True, - } - results = POST('/pool/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 240) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - pool_id = job_status['results']['result']['id'] - - -def test_verify_pool_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_pool_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_pool_dataset_is_passphrase_encrypted_and_algorithm_encryption(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{encrypted_pool_name}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - assert results.json()['encryption_algorithm']['value'] == 'AES-128-CCM', results.text - - -def test_create_a_passphrase_encrypted_root_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': False, - 'pbkdf2iters': 100000, - 'algorithm': 'AES-128-CCM', - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_pool_encrypted_root_dataset_change_key_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_try_to_change_a_passphrase_encrypted_root_to_key_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'key': dataset_token_hex, - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'FAILED', str(job_status['results']) - - -def test_verify_pool_dataset_change_key_does_not_leak_passphrase_into_middleware_log_after_key_change(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_delete_encrypted_dataset_from_encrypted_root_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_a_dataset_to_inherit_encryption_from_the_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_delete_encrypted_dataset_from_the_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_try_to_create_an_encrypted_root_with_generate_key_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - - -def test_try_to_create_an_encrypted_root_with_key_on_passphrase_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 422, results.text - - -def test_verify_pool_key_encrypted_dataset_does_not_leak_encryption_key_into_middleware_log(request): - cmd = f"""grep -R "{dataset_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_delete_the_passphrase_encrypted_pool_with_is_datasets(request): - depends(request, ['CREATED_POOL']) - payload = { - 'cascade': True, - 'restart_services': True, - 'destroy': True - } - results = POST(f'/pool/id/{pool_id}/export/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_creating_a_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - global pool_id - payload = { - 'name': encrypted_pool_name, - 'encryption': True, - 'encryption_options': { - 'algorithm': 'AES-128-CCM', - 'key': pool_token_hex, - }, - 'topology': { - 'data': [ - {'type': 'STRIPE', 'disks': pool_disks} - ], - }, - "allow_duplicate_serials": True, - } - results = POST('/pool/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 240) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - pool_id = job_status['results']['result']['id'] - - -def test_verify_pool_does_not_leak_encryption_key_into_middleware_log(request): - cmd = f"""grep -R "{pool_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_pool_dataset_is_hex_key_encrypted_and_algorithm_encryption(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{encrypted_pool_name}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - assert results.json()['encryption_algorithm']['value'] == 'AES-128-CCM', results.text - - -def test_creating_a_key_encrypted_root_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_verify_pool_dataset_does_not_leak_encryption_hex_key_into_middleware_log(request): - cmd = f"""grep -R "{dataset_token_hex}" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_change_a_key_encrypted_root_to_passphrase_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'change_key_options': { - 'passphrase': 'my_passphrase', - } - } - results = POST('/pool/dataset/change_key/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_pool_encrypted_root_key_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_dataset_changed_to_passphrase(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_lock_passphrase_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_the_dataset_is_locked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is True, results.text - - -def test_verify_passphrase_encrypted_root_unlock_successful_is_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_passphrase_key_encrypted_datasets(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'my_passphrase' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [dataset], str(job_status['results']) - - -def test_verify_pool_dataset_unlock_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_passphrase_key_encrypted_root_is_unlocked(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_delete_passphrase_key_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_an_dataset_with_inherit_encryption_from_the_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'inherit_encryption': True - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_inherit_encryption_from_the_key_encrypted_pool_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_an_encrypted_dataset_with_generate_key_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'generate_key': True, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_delete_generate_key_encrypted_dataset(request): - depends(request, ['CREATED_POOL']) - results = DELETE(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - - -def test_create_a_passphrase_encrypted_root_dataset_parrent(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_pool_passphrase_encrypted_root_dataset_parrent_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_create_a_passphrase_encrypted_root_child_of_passphrase_parent(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': child_dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase2', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_encrypted_root_child_of_passphrase_parent_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase2" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_lock_passphrase_encrypted_root_with_is_child(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def test_verify_the_parrent_encrypted_root_unlock_successful_is_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_parrent_encrypted_root_dataset_is_locked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is True, results.text - - -def test_verify_the_chid_of_the_encrypted_root_parent_unlock_successful_is_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == child_dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_child_dataset_is_locked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is True, results.text - - -def test_try_to_unlock_the_child_of_lock_parent_encrypted_root(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': child_dataset, - 'passphrase': 'my_passphrase2' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'FAILED', str(job_status['results']) - assert f'{child_dataset} has locked parents' in str(job_status['results']), str(job_status['results']) - assert job_status['results']['result'] is None, str(job_status['results']) - - -def test_verify_child_of_lock_parent_encrypted_root_dataset_unlock_do_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase2" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_Verify_chid_unlock_successful_is_still_false(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == child_dataset: - assert dictionary['unlock_successful'] is False, str(job_status_result) - assert dictionary['locked'] is True, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_unlock_parent_dataset_with_child_recursively(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': dataset, - 'passphrase': 'my_passphrase' - }, - { - 'name': child_dataset, - 'passphrase': 'my_passphrase2' - } - ] - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [dataset, child_dataset], str(job_status['results']) - - -def test_verify_pool_dataset_unlock_with_child_dataset_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - cmd = """grep -R "my_passphrase2" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_parent_dataset_unlock_successful_is_true(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == dataset: - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_dataset_is_unlocked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is False, results.text - - -def test_verify_the_child_dataset_unlock_successful_is_true(request): - depends(request, ['CREATED_POOL']) - payload = { - 'id': child_dataset - } - results = POST('/pool/dataset/encryption_summary/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - job_status_result = job_status['results']['result'] - for dictionary in job_status_result: - if dictionary['name'] == child_dataset: - assert dictionary['unlock_successful'] is True, str(job_status_result) - assert dictionary['locked'] is False, str(job_status_result) - break - else: - assert False, str(job_status_result) - - -def test_verify_the_child_dataset_is_unlocked(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}/') - assert results.status_code == 200, results.text - assert results.json()['locked'] is False, results.text - - -def test_delete_dataset_with_is_child_recursive(request): - depends(request, ['CREATED_POOL']) - payload = { - "recursive": True, - } - results = DELETE(f'/pool/dataset/id/{dataset_url}/', payload) - assert results.status_code == 200, results.text - - -def test_creating_a_key_encrypted_dataset_on_key_encrypted_pool(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': dataset, - 'encryption_options': { - 'key': dataset_token_hex, - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_pool_encrypted_dataset_on_key_encrypted_pool_does_not_leak_encryption_key_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_create_a_passphrase_encrypted_root_from_key_encrypted_root(request): - depends(request, ['CREATED_POOL']) - payload = { - 'name': child_dataset, - 'encryption_options': { - 'passphrase': 'my_passphrase', - }, - 'encryption': True, - 'inherit_encryption': False - } - results = POST('/pool/dataset/', payload) - assert results.status_code == 200, results.text - - -def test_verify_ncrypted_root_from_key_encrypted_root_does_not_leak_passphrase_into_middleware_log(request): - cmd = """grep -R "my_passphrase" /var/log/middlewared.log""" - results = SSH_TEST(cmd, user, password) - assert results['result'] is False, str(results['output']) - - -def test_verify_the_new_passprase_encrypted_root_is_passphrase(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'PASSPHRASE', results.text - - -def test_run_inherit_parent_encryption_properties_on_the_passprase(request): - depends(request, ['CREATED_POOL']) - results = POST('/pool/dataset/inherit_parent_encryption_properties', child_dataset) - assert results.status_code == 200, results.text - - -def test_verify_the_the_child_got_props_by_the_parent_root(request): - depends(request, ['CREATED_POOL']) - results = GET(f'/pool/dataset/id/{child_dataset_url}') - assert results.status_code == 200, results.text - assert results.json()['key_format']['value'] == 'HEX', results.text - - -def test_delete_the_key_encrypted_pool_with_all_the_dataset(request): - depends(request, ['CREATED_POOL']) - payload = { - 'cascade': True, - 'restart_services': True, - 'destroy': True - } - results = POST(f'/pool/id/{pool_id}/export/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) diff --git a/tests/api2/test_426_smb_vss.py b/tests/api2/test_426_smb_vss.py index cbc7a431606a..2cb5b246ae60 100644 --- a/tests/api2/test_426_smb_vss.py +++ b/tests/api2/test_426_smb_vss.py @@ -16,6 +16,7 @@ from pytest_dependency import depends from protocols import SMB from samba import ntstatus +from middlewared.test.integration.utils import call from middlewared.test.integration.utils.client import truenas_server @@ -343,9 +344,7 @@ def test_051_disable_smb1(request): def test_052_stopping_smb_service(request): depends(request, ["VSS_SMB_SERVICE_STARTED"]) - payload = {"service": "cifs"} - results = POST("/service/stop/", payload) - assert results.status_code == 200, results.text + assert call("service.stop", "cifs") sleep(1) diff --git a/tests/api2/test_438_snapshots.py b/tests/api2/test_438_snapshots.py index aca2feddd1e4..af016b72aa39 100644 --- a/tests/api2/test_438_snapshots.py +++ b/tests/api2/test_438_snapshots.py @@ -1,13 +1,6 @@ -#!/usr/bin/env python3 -import os -import sys - from middlewared.test.integration.assets.pool import dataset, snapshot - -apifolder = os.getcwd() -sys.path.append(apifolder) from auto_config import pool_name -from functions import DELETE, GET, POST, PUT, wait_on_job +from middlewared.test.integration.utils import call def _verify_snapshot_keys_present(snap, expected, unexpected): @@ -81,20 +74,17 @@ def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, with dataset(dataset_name) as dataset_id: with snapshot(dataset_id, "snap01", get=True) as snap01_config: payload = { - 'query-filters': [['dataset', '=', dataset_id]], + 'query-filters': [["dataset", "=", dataset_id]], 'query-options': { 'extra': { 'properties': properties_list } } } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) # Check that we have one snap returned and that it has the expected # data - assert len(snaps) == 1, snaps + assert len(snaps) == 1 snap = snaps[0] _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) _verify_snapshot_against_config(snap, dataset_id, snap01_config) @@ -104,13 +94,10 @@ def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, # Now create another snapshot and re-issue the query to check the # new results. with snapshot(dataset_id, "snap02", get=True) as snap02_config: - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) # Check that we have two snaps returned and that they have the expected # data. - assert len(snaps) == 2, snaps + assert len(snaps) == 2 # Need to sort the snaps by createtxg ssnaps = sorted(snaps, key=lambda d: int(d['createtxg'])) @@ -132,23 +119,17 @@ def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, with snapshot(dataset2, "snap03", get=True) as snap03_config: # First issue the original query again & ensure we still have # the expected snapshots - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 2, snaps + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) + assert len(snaps) == 2 for snap in snaps: assert snap['createtxg'] in existing_snaps, f"Got unexpected snap: {snap}" # Next issue the query with a different filter payload.update({ - 'query-filters': [['dataset', '=', dataset2]] + 'query-filters': [["dataset", "=", dataset2]] }) - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 1, snaps + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) + assert len(snaps) == 1 snap = snaps[0] assert snap['createtxg'] not in existing_snaps, f"Got unexpected snap: {snap}" new_snaps = {snap['createtxg']} @@ -159,13 +140,10 @@ def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, # Next issue the query with a bogus filter payload.update({ - 'query-filters': [['dataset', '=', f"{dataset_name}-BOGUS"]] + 'query-filters': [["dataset", "=", f"{dataset_name}-BOGUS"]] }) - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 0, snaps + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) + assert len(snaps) == 0 # Next issue the query WITHOUT a filter. It's possible # that this test could be run while other snapshots are @@ -174,22 +152,16 @@ def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list, payload.update({ 'query-filters': [] }) - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) >= 3, len(snaps) + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) + assert len(snaps) >= 3 all_snaps = set([s['createtxg'] for s in snaps]) assert existing_snaps.issubset(all_snaps), "Existing snaps not returned in filterless query" assert new_snaps.issubset(all_snaps), "New snaps not returned in filterless query" # Let the snap03 get cleaned up, and then ensure even with a filterless query # that it is no longer returned. - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) >= 2, len(snaps) + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) + assert len(snaps) >= 2 all_snaps = set([s['createtxg'] for s in snaps]) assert existing_snaps.issubset(all_snaps), "Existing snaps not returned in filterless query" assert not new_snaps.issubset(all_snaps), "New snaps returned in filterless query" @@ -284,13 +256,10 @@ def _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, expe } } } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) # Check that we have one snap returned and that it has the expected # data - assert len(snaps) == 1, snaps + assert len(snaps) == 1 snap = snaps[0] _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) _verify_snapshot_against_config(snap, dataset_id, snap01_config) @@ -306,13 +275,10 @@ def _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, expe } } } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) # Check that we have one snap returned and that it has the expected # data - assert len(snaps) == 1, snaps + assert len(snaps) == 1 snap = snaps[0] _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys) _verify_snapshot_against_config(snap, dataset_id, snap02_config) @@ -320,11 +286,8 @@ def _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, expe _verify_snapshot_properties(snap, properties_list) # Allow snap02 to be destroyed, then query again to make sure we don't get it - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() - assert len(snaps) == 0, snaps + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) + assert len(snaps) == 0 def _test_simple_snapshot_query_filter_snapshot(dataset_name, properties_list): @@ -414,23 +377,17 @@ def _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list, expected } } } - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) original_snap_count = len(snaps) with snapshot(dataset_id, "snap01", get=True) as snap01_config: with snapshot(dataset_id, "snap02", get=True) as snap02_config: # Query again - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) - # Check that we have two additional snap returned and that + # Check that we have two additional snap returned and that # they have the expected data - assert len(snaps) == original_snap_count+2, snaps + assert len(snaps) == original_snap_count+2 ssnaps = sorted(snaps, key=lambda d: int(d['createtxg'])) snap01 = ssnaps[-2] snap02 = ssnaps[-1] @@ -443,12 +400,9 @@ def _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list, expected _verify_snapshot_properties(snap02, properties_list) # Allow snap02 to be destroyed & query again. - results = GET(f"/zfs/snapshot", payload) - assert results.status_code == 200, results.text - assert isinstance(results.json(), list), results.text - snaps = results.json() + snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"]) - assert len(snaps) == original_snap_count+1, snaps + assert len(snaps) == original_snap_count+1 ssnaps = sorted(snaps, key=lambda d: int(d['createtxg'])) snap01 = ssnaps[-1] _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys) diff --git a/tests/api2/test_440_snmp.py b/tests/api2/test_440_snmp.py index eaba458e119d..4dc99d2d283c 100644 --- a/tests/api2/test_440_snmp.py +++ b/tests/api2/test_440_snmp.py @@ -358,7 +358,7 @@ def test_SNMPv3_user_retained_across_service_restart(self): reset_systemd_svcs("snmpd snmp-agent") res = call('service.stop', 'snmp') - assert res is False + assert res is True res = call('service.start', 'snmp') assert res is True res = call('snmp.get_snmp_users') diff --git a/tests/api2/test_450_staticroutes.py b/tests/api2/test_450_staticroutes.py deleted file mode 100644 index 7164a5432ed0..000000000000 --- a/tests/api2/test_450_staticroutes.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# License: BSD - - -import os -import pytest -import sys -from pytest_dependency import depends -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import DELETE, GET, POST, SSH_TEST -from auto_config import user, password -DESTINATION = '127.1.1.1' -GATEWAY = '127.0.0.1' - - -@pytest.fixture(scope='module') -def sr_dict(): - return {} - - -def test_01_creating_staticroute(sr_dict): - results = POST('/staticroute/', { - 'destination': DESTINATION, - 'gateway': GATEWAY, - 'description': 'test route', - }) - assert results.status_code == 200, results.text - sr_dict['newroute'] = results.json() - - -def test_02_check_staticroute_configured_using_api(sr_dict): - results = GET(f'/staticroute/?id={sr_dict["newroute"]["id"]}') - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, list), data - assert len(data) == 1, data - assert DESTINATION in data[0]['destination'], data - assert data[0]['gateway'] == GATEWAY, data - - -def test_03_checking_staticroute_configured_using_ssh(request): - results = SSH_TEST(f'netstat -4rn|grep -E ^{DESTINATION}', user, password) - assert results['result'] is True, results - assert results['stdout'].strip().split()[1] == GATEWAY, results - - -def test_04_delete_staticroute(sr_dict): - results = DELETE(f'/staticroute/id/{sr_dict["newroute"]["id"]}/') - assert results.status_code == 200, results.text - - -def test_05_check_staticroute_unconfigured_using_api(sr_dict): - results = GET(f'/staticroute/?destination={DESTINATION}') - assert results.status_code == 200, results.text - data = results.json() - assert isinstance(data, list), data - assert len(data) == 0, data - - -def test_06_checking_staticroute_unconfigured_using_ssh(request): - results = SSH_TEST(f'netstat -4rn|grep -E ^{DESTINATION}', user, password) - assert results['result'] is False, results diff --git a/tests/api2/test_470_system.py b/tests/api2/test_470_system.py deleted file mode 100644 index c88fef6c465e..000000000000 --- a/tests/api2/test_470_system.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python3 - -# Author: Eric Turgeon -# License: BSD -# Location for tests into REST API of FreeNAS - -import os -import sys - -apifolder = os.getcwd() -sys.path.append(apifolder) -from functions import GET -from middlewared.test.integration.utils import call - - -def test_01_check_if_system_is_ready_to_use(): - results = GET("/system/ready/") - assert results.json() is True, results.text - - -def test_02_checking_system_version(): - results = GET("/system/version/") - assert results.status_code == 200, results.text - assert type(results.json()) == str, results.text - - -def test_03_check_system_version_match_with_system_info(): - system_version = GET("/system/info/").json()['version'] - system_info_version = GET("/system/version/").json() - assert system_version == system_info_version - - -def test_04_check_system_product_type(): - results = GET("/system/product_type/") - assert results.status_code == 200, results.text - result = results.json() - assert isinstance(result, str), results.text - assert result in ('SCALE', 'SCALE_ENTERPRISE'), results.text - - -def test_05_check_system_debug(): - results = GET("/system/debug/") - assert results.status_code == 200, results.text - - -def test_06_check_system_set_time(): - """ - This test intentionally slews our clock to be off - by 300 seconds and then verifies that it got set - """ - results = GET("/system/info/") - assert results.status_code == 200, results.text - - # Convert to seconds - datetime = results.json()['datetime']['$date'] / 1000 - - # hop 300 seconds into the past - target = datetime - 300 - call('system.set_time', int(target)) - - results = GET("/system/info/") - assert results.status_code == 200, results.text - datetime2 = results.json()['datetime']['$date'] / 1000 - - # This is a fudge-factor because NTP will start working - # pretty quickly to correct the slew. - assert abs(target - datetime2) < 60 diff --git a/tests/api2/test_999_pool_dataset_unlock.py b/tests/api2/test_999_pool_dataset_unlock.py deleted file mode 100644 index 9c1944268308..000000000000 --- a/tests/api2/test_999_pool_dataset_unlock.py +++ /dev/null @@ -1,191 +0,0 @@ -import os -import sys -apifolder = os.getcwd() -sys.path.append(apifolder) - -import contextlib -import urllib.parse - -import pytest - -from auto_config import pool_name -from functions import POST, DELETE, wait_on_job -from middlewared.test.integration.assets.account import user -from middlewared.test.integration.utils import ssh -from protocols import SMB -from samba import ntstatus, NTSTATUSError - - -SMB_PASSWORD = 'Abcd1234' -SMB_USER = 'smbuser999' - - -def passphrase_encryption(): - return { - 'encryption_options': { - 'generate_key': False, - 'pbkdf2iters': 100000, - 'algorithm': 'AES-128-CCM', - 'passphrase': 'passphrase', - }, - 'encryption': True, - 'inherit_encryption': False, - } - - -@contextlib.contextmanager -def dataset(name, options=None): - assert "/" not in name - - dataset = f"{pool_name}/{name}" - - result = POST("/pool/dataset/", {"name": dataset, **(options or {})}) - assert result.status_code == 200, result.text - - result = POST("/filesystem/setperm/", {'path': f"/mnt/{dataset}", "mode": "777"}) - assert result.status_code == 200, result.text - job_status = wait_on_job(result.json(), 180) - assert job_status["state"] == "SUCCESS", str(job_status["results"]) - - try: - yield dataset - finally: - result = DELETE(f"/pool/dataset/id/{urllib.parse.quote(dataset, '')}/") - assert result.status_code == 200, result.text - - -@contextlib.contextmanager -def smb_share(name, path, options=None): - results = POST("/sharing/smb/", { - "name": name, - "path": path, - "guestok": True, - **(options or {}), - }) - assert results.status_code == 200, results.text - id = results.json()["id"] - - try: - yield id - finally: - result = DELETE(f"/sharing/smb/id/{id}/") - assert result.status_code == 200, result.text - - -def lock_dataset(name): - payload = { - 'id': name, - 'lock_options': { - 'force_umount': True - } - } - results = POST('/pool/dataset/lock', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - - -def unlock_dataset(name, options=None): - payload = { - 'id': name, - 'unlock_options': { - 'recursive': True, - 'datasets': [ - { - 'name': name, - 'passphrase': 'passphrase' - } - ], - **(options or {}), - } - } - results = POST('/pool/dataset/unlock/', payload) - assert results.status_code == 200, results.text - job_id = results.json() - job_status = wait_on_job(job_id, 120) - assert job_status['state'] == 'SUCCESS', str(job_status['results']) - assert job_status['results']['result']['unlocked'] == [name], str(job_status['results']) - - -@contextlib.contextmanager -def smb_connection(**kwargs): - c = SMB() - c.connect(**kwargs) - - try: - yield c - finally: - c.disconnect() - - -@pytest.fixture(scope='module') -def smb_user(): - with user({ - 'username': SMB_USER, - 'full_name': 'doug', - 'group_create': True, - 'password': SMB_PASSWORD, - 'smb': True - }, get_instance=True) as u: - yield u - - -@pytest.mark.dependency(name="create_dataset") -@pytest.mark.parametrize("toggle_attachments", [True, False]) -def test_pool_dataset_unlock_smb(smb_user, toggle_attachments): - # Prepare test SMB share - with dataset("normal") as normal: - with smb_share("normal", f"/mnt/{normal}"): - # Create an encrypted SMB share, unlocking which might lead to SMB service interruption - with dataset("encrypted", passphrase_encryption()) as encrypted: - with smb_share("encrypted", f"/mnt/{encrypted}"): - ssh(f"touch /mnt/{encrypted}/secret") - results = POST("/service/start/", {"service": "cifs"}) - assert results.status_code == 200, results.text - lock_dataset(encrypted) - # Mount test SMB share - with smb_connection( - share="normal", - username=SMB_USER, - password=SMB_PASSWORD - ) as normal_connection: - # Locked share should not be mountable - with pytest.raises(NTSTATUSError) as e: - with smb_connection( - share="encrypted", - username=SMB_USER, - password=SMB_PASSWORD - ): - pass - - assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME - - conn = normal_connection.show_connection() - assert conn['connected'], conn - unlock_dataset(encrypted, {"toggle_attachments": toggle_attachments}) - - conn = normal_connection.show_connection() - assert conn['connected'], conn - - if toggle_attachments: - # We should be able to mount encrypted share - with smb_connection( - share="encrypted", - username=SMB_USER, - password=SMB_PASSWORD - ) as encrypted_connection: - assert [x["name"] for x in encrypted_connection.ls("")] == ["secret"] - else: - # We should still not be able to mount encrypted share as we did not reload attachments - with pytest.raises(NTSTATUSError) as e: - with smb_connection( - share="encrypted", - username=SMB_USER, - password=SMB_PASSWORD - ): - pass - - assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME - results = POST("/service/stop/", {"service": "cifs"}) - assert results.status_code == 200, results.text diff --git a/tests/api2/test_account.py b/tests/api2/test_account.py index 829f892d7728..6a92eb1e977c 100644 --- a/tests/api2/test_account.py +++ b/tests/api2/test_account.py @@ -102,10 +102,22 @@ def test_delete_group_audit(): }) as g: with expect_audit_method_calls([{ "method": "group.delete", - "params": [g["id"], {}], + "params": [g["id"]], "description": "Delete group group2", }]): - call("group.delete", g["id"], {}) + call("group.delete", g["id"]) + + +def test_delete_group_audit_delete_users(): + with group({ + "name": "group2", + }) as g: + with expect_audit_method_calls([{ + "method": "group.delete", + "params": [g["id"], {"delete_users": True}], + "description": "Delete group group2 and all users that have this group as their primary group", + }]): + call("group.delete", g["id"], {"delete_users": True}) def test_update_account_using_api_key(): diff --git a/tests/api2/test_account_group.py b/tests/api2/test_account_group.py new file mode 100644 index 000000000000..55f40a8af407 --- /dev/null +++ b/tests/api2/test_account_group.py @@ -0,0 +1,37 @@ +import pytest + +from middlewared.service_exception import InstanceNotFound +from middlewared.test.integration.assets.account import user, group +from middlewared.test.integration.utils import call + + +def test_delete_group_delete_users(): + with group({ + "name": "group1", + }) as g: + with user({ + "username": "user1", + "full_name": "user1", + "group": g["id"], + "password": "test1234", + }) as u1: + with user({ + "username": "user2", + "full_name": "user2", + "group": g["id"], + "password": "test1234", + }) as u2: + with user({ + "username": "user3", + "full_name": "user3", + "group_create": True, + "groups": [g["id"]], + "password": "test1234", + }) as u3: + call("group.delete", g["id"], {"delete_users": True}) + + with pytest.raises(InstanceNotFound): + call("user.get_instance", u1["id"]) + with pytest.raises(InstanceNotFound): + call("user.get_instance", u2["id"]) + call("user.get_instance", u3["id"]) diff --git a/tests/api2/test_alert.py b/tests/api2/test_alert.py new file mode 100644 index 000000000000..8a79d39b4004 --- /dev/null +++ b/tests/api2/test_alert.py @@ -0,0 +1,74 @@ +from time import sleep + +import pytest + +from auto_config import pool_name +from middlewared.test.integration.utils import call, ssh + + +ID_PATH = "/dev/disk/by-partuuid/" + + +def get_alert_by_id(alert_id): + return next(filter(lambda alert: alert["id"] == alert_id, call("alert.list")), None) + + +def wait_for_alert(timeout=120): + for _ in range(timeout): + for alert in call("alert.list"): + if ( + alert["source"] == "VolumeStatus" and + alert["args"]["volume"] == pool_name and + alert["args"]["state"] == "DEGRADED" + ): + return alert["id"] + sleep(1) + + +@pytest.fixture(scope="module") +def degraded_pool_gptid(): + get_pool = call("pool.query", [["name", "=", pool_name]], {"get": True}) + gptid = get_pool["topology"]["data"][0]["path"].replace(ID_PATH, "") + ssh(f"zinject -d {gptid} -A fault {pool_name}") + return gptid + + +@pytest.fixture(scope="module") +def alert_id(degraded_pool_gptid): + call("alert.process_alerts") + result = wait_for_alert() + if result is None: + pytest.fail("Timed out while waiting for alert.") + return result + + +def test_verify_the_pool_is_degraded(degraded_pool_gptid): + status = call("zpool.status", {"name": pool_name}) + disk_status = status[pool_name]["data"][ID_PATH + degraded_pool_gptid]["disk_status"] + assert disk_status == "DEGRADED" + + +def test_dismiss_alert(alert_id): + call("alert.dismiss", alert_id) + alert = get_alert_by_id(alert_id) + assert alert["dismissed"] is True, alert + + +def test_restore_alert(alert_id): + call("alert.restore", alert_id) + alert = get_alert_by_id(alert_id) + assert alert["dismissed"] is False, alert + + +def test_clear_the_pool_degradation(degraded_pool_gptid): + ssh(f"zpool clear {pool_name}") + status = call("zpool.status", {"name": pool_name}) + disk_status = status[pool_name]["data"][ID_PATH + degraded_pool_gptid]["disk_status"] + assert disk_status != "DEGRADED" + + +@pytest.mark.timeout(120) +def test_wait_for_the_alert_to_disappear(alert_id): + call("alert.process_alerts") + while get_alert_by_id(alert_id) is not None: + sleep(1) diff --git a/tests/api2/test_audit_basic.py b/tests/api2/test_audit_basic.py index ec89b5dced2c..d76a5b87c8a9 100644 --- a/tests/api2/test_audit_basic.py +++ b/tests/api2/test_audit_basic.py @@ -4,6 +4,7 @@ from middlewared.test.integration.utils import call, url from middlewared.test.integration.utils.audit import get_audit_entry +from auto_config import ha from protocols import smb_connection from time import sleep @@ -43,7 +44,6 @@ class AUDIT_CONFIG(): } -# def get_zfs(key, zfs_config): def get_zfs(data_type, key, zfs_config): """ Get the equivalent ZFS value associated with the audit config setting """ @@ -64,10 +64,27 @@ def get_zfs(data_type, key, zfs_config): 'used_by_reservation': zfs_config['properties']['usedbyrefreservation']['parsed'] } } - # return zfs[key] return types[data_type][key] +def check_audit_download(report_path, report_type, tag=None): + """ Download audit DB (root user) + If requested, assert the tag is present + INPUT: report_type ['CSV'|'JSON'|'YAML'] + RETURN: lenght of content (bytes) + """ + job_id, url_path = call( + "core.download", "audit.download_report", + [{"report_name": os.path.basename(report_path)}], + f"report.{report_type.lower()}" + ) + r = requests.get(f"{url()}{url_path}") + r.raise_for_status() + if tag is not None: + assert f"{tag}" in r.text + return len(r.content) + + @pytest.fixture(scope='class') def initialize_for_smb_tests(): with dataset('audit-test-basic', data={'share_type': 'SMB'}) as ds: @@ -97,6 +114,32 @@ def init_audit(): call('audit.update', AUDIT_CONFIG.defaults) +@pytest.fixture(scope='class') +def standby_user(): + """ HA system: Create a user on the BACKUP node + This will generate a 'create' audit entry, yield, + and on exit generate a 'delete' audit entry. + """ + user_id = None + try: + name = "StandbyUser" + PASSWD + user_id = call( + 'failover.call_remote', 'user.create', [{ + "username": name, + "full_name": name + " Deleteme", + "group": 100, + "smb": False, + "home_create": False, + "password": "testing" + }], + {'raise_connect_error': False, 'timeout': 2, 'connect_timeout': 2} + ) + yield name + finally: + if user_id is not None: + call('failover.call_remote', 'user.delete', [user_id]) + + # ===================================================================== # Tests # ===================================================================== @@ -242,14 +285,8 @@ def test_audit_export(self): st = call('filesystem.stat', report_path) assert st['size'] != 0, str(st) - job_id, path = call( - "core.download", "audit.download_report", - [{"report_name": os.path.basename(report_path)}], - f"report.{backend.lower()}" - ) - r = requests.get(f"{url()}{path}") - r.raise_for_status() - assert len(r.content) == st['size'] + content_len = check_audit_download(report_path, backend) + assert content_len == st['size'] def test_audit_export_nonroot(self): with unprivileged_user_client(roles=['SYSTEM_AUDIT_READ', 'FILESYSTEM_ATTRS_READ']) as c: @@ -262,6 +299,7 @@ def test_audit_export_nonroot(self): st = c.call('filesystem.stat', report_path) assert st['size'] != 0, str(st) + # Make the call as the client job_id, path = c.call( "core.download", "audit.download_report", [{"report_name": os.path.basename(report_path)}], @@ -282,3 +320,51 @@ def test_audit_timestamps(self, svc): ae_ts_ts = int(audit_entry['timestamp'].timestamp()) ae_msg_ts = int(audit_entry['message_timestamp']) assert abs(ae_ts_ts - ae_msg_ts) < 2, f"$date='{ae_ts_ts}, message_timestamp={ae_msg_ts}" + + +@pytest.mark.skipif(not ha, reason="Skip HA tests") +class TestAuditOpsHA: + def test_audit_ha_query(self, standby_user): + name = standby_user + remote_user = call( + 'failover.call_remote', 'user.query', + [[["username", "=", name]]], + {'raise_connect_error': False, 'timeout': 2, 'connect_timeout': 2} + ) + assert remote_user != [] + + # Handle delays in the audit database + remote_audit_entry = [] + tries = 3 + while tries > 0 and remote_audit_entry == []: + sleep(1) + remote_audit_entry = call('audit.query', { + "query-filters": [["event_data.description", "$", name]], + "query-options": {"select": ["event_data", "success"]}, + "remote_controller": True + }) + if remote_audit_entry != []: + break + tries -= 1 + + assert tries > 0, "Failed to get expected audit entry" + assert remote_audit_entry != [] + params = remote_audit_entry[0]['event_data']['params'][0] + assert params['username'] == name + + def test_audit_ha_export(self, standby_user): + """ + Confirm we can download 'Active' and 'Standby' audit DB. + With a user created on the 'Standby' controller download the + audit DB from both controllers and confirm the user create is + in the 'Standby' audit DB and not in the 'Active' audit DB. + """ + report_path_active = call('audit.export', {'export_format': 'CSV'}, job=True) + report_path_standby = call('audit.export', {'export_format': 'CSV', 'remote_controller': True}, job=True) + + # Confirm entry NOT in active controller audit DB + with pytest.raises(AssertionError): + check_audit_download(report_path_active, 'CSV', f"Create user {standby_user}") + + # Confirm entry IS in standby controller audit DB + check_audit_download(report_path_standby, 'CSV', f"Create user {standby_user}") diff --git a/tests/api2/test_audit_ftp.py b/tests/api2/test_audit_ftp.py index 14075d5c2753..f7705a45517c 100644 --- a/tests/api2/test_audit_ftp.py +++ b/tests/api2/test_audit_ftp.py @@ -1,26 +1,8 @@ -import os -import sys - -import pytest -from middlewared.test.integration.assets.pool import dataset from middlewared.test.integration.utils import call from middlewared.test.integration.utils.audit import expect_audit_method_calls -sys.path.append(os.getcwd()) -from functions import PUT - - -@pytest.fixture(scope='module') -def nfs_audit_dataset(request): - with dataset('audit-test-nfs') as ds: - try: - yield ds - finally: - pass - -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_ftp_config_audit(api): +def test_ftp_config_audit(): ''' Test the auditing of FTP configuration changes ''' @@ -36,23 +18,11 @@ def test_ftp_config_audit(api): 'params': [payload], 'description': 'Update FTP configuration', }]): - if api == 'ws': - call('ftp.update', payload) - elif api == 'rest': - result = PUT('/ftp/', payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) + call('ftp.update', payload) finally: # Restore initial state restore_payload = { 'clients': initial_ftp_config['clients'], 'banner': initial_ftp_config['banner'] } - if api == 'ws': - call('ftp.update', restore_payload) - elif api == 'rest': - result = PUT('/ftp/', restore_payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) + call('ftp.update', restore_payload) diff --git a/tests/api2/test_audit_nfs.py b/tests/api2/test_audit_nfs.py index 015ef133fa10..7ccdcce08b78 100644 --- a/tests/api2/test_audit_nfs.py +++ b/tests/api2/test_audit_nfs.py @@ -1,18 +1,8 @@ -import os -import sys - import pytest -from middlewared.service_exception import CallError from middlewared.test.integration.assets.pool import dataset from middlewared.test.integration.utils import call from middlewared.test.integration.utils.audit import expect_audit_method_calls -sys.path.append(os.getcwd()) -from functions import DELETE, POST, PUT - -REDACTED_SECRET = '********' - - @pytest.fixture(scope='module') def nfs_audit_dataset(request): with dataset('audit-test-nfs') as ds: @@ -22,8 +12,7 @@ def nfs_audit_dataset(request): pass -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_nfs_config_audit(api): +def test_nfs_config_audit(): ''' Test the auditing of NFS configuration changes ''' @@ -42,13 +31,7 @@ def test_nfs_config_audit(api): 'params': [payload], 'description': 'Update NFS configuration', }]): - if api == 'ws': - call('nfs.update', payload) - elif api == 'rest': - result = PUT('/nfs/', payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) + call('nfs.update', payload) finally: # Restore initial state restore_payload = { @@ -56,17 +39,10 @@ def test_nfs_config_audit(api): 'mountd_port': initial_nfs_config['mountd_port'], 'protocols': initial_nfs_config['protocols'] } - if api == 'ws': - call('nfs.update', restore_payload) - elif api == 'rest': - result = PUT('/nfs/', restore_payload) - assert result.status_code == 200, result.text - else: - raise ValueError(api) + call('nfs.update', restore_payload) -@pytest.mark.parametrize('api', ['ws', 'rest']) -def test_nfs_share_audit(api, nfs_audit_dataset): +def test_nfs_share_audit(nfs_audit_dataset): ''' Test the auditing of NFS share operations ''' @@ -83,14 +59,7 @@ def test_nfs_share_audit(api, nfs_audit_dataset): 'params': [payload], 'description': f'NFS share create {nfs_export_path}', }]): - if api == 'ws': - share_config = call('sharing.nfs.create', payload) - elif api == 'rest': - results = POST("/sharing/nfs/", payload) - assert results.status_code == 200, results.text - share_config = results.json() - else: - raise ValueError(api) + share_config = call('sharing.nfs.create', payload) # UPDATE payload = { "security": [] @@ -103,14 +72,7 @@ def test_nfs_share_audit(api, nfs_audit_dataset): ], 'description': f'NFS share update {nfs_export_path}', }]): - if api == 'ws': - share_config = call('sharing.nfs.update', share_config['id'], payload) - elif api == 'rest': - results = PUT(f"/sharing/nfs/id/{share_config['id']}/", payload) - assert results.status_code == 200, results.text - share_config = results.json() - else: - raise ValueError(api) + share_config = call('sharing.nfs.update', share_config['id'], payload) finally: if share_config is not None: # DELETE @@ -120,10 +82,4 @@ def test_nfs_share_audit(api, nfs_audit_dataset): 'params': [id_], 'description': f'NFS share delete {nfs_export_path}', }]): - if api == 'ws': - call('sharing.nfs.delete', id_) - elif api == 'rest': - result = DELETE(f'/sharing/nfs/id/{id_}') - assert result.status_code == 200, result.text - else: - raise ValueError(api) + call('sharing.nfs.delete', id_) diff --git a/tests/api2/test_core_download.py b/tests/api2/test_core_download.py new file mode 100644 index 000000000000..0db1df72d693 --- /dev/null +++ b/tests/api2/test_core_download.py @@ -0,0 +1,14 @@ +import requests + +from middlewared.test.integration.utils.client import truenas_server +from middlewared.test.integration.utils import call + + +def test_get_download_for_config_dot_save(): + # set up core download + job_id, url = call('core.download', 'config.save', [], 'freenas.db') + + # download from URL + rv = requests.get(f'http://{truenas_server.ip}{url}') + assert rv.status_code == 200 + assert len(rv.content) > 0 diff --git a/tests/api2/test_mail_admins.py b/tests/api2/test_mail_admins.py new file mode 100644 index 000000000000..3d9c8edaef30 --- /dev/null +++ b/tests/api2/test_mail_admins.py @@ -0,0 +1,37 @@ +import pytest + +from middlewared.test.integration.assets.account import user +from middlewared.test.integration.utils import call + +MAILUSER = 'wilbur' +MAILADDR = 'wilbur.spam@ixsystems.com' +NONMAIL_USER = 'wilburette' +NONMAIL_ADDR = 'wilburette.spam@ixsystems.com' +PASSWD = 'abcd1234' + + +@pytest.fixture(scope='module') +def full_admin_user(): + ba_id = call('group.query', [['gid', '=', 544]], {'get': True})['id'] + with user({ + 'username': NONMAIL_USER, + 'full_name': NONMAIL_USER, + 'group_create': True, + 'email': NONMAIL_ADDR, + 'password': PASSWD + }, get_instance=False): + with user({ + 'username': MAILUSER, + 'full_name': MAILUSER, + 'group_create': False, + 'email': MAILADDR, + 'group': ba_id, + 'password': PASSWD + }, get_instance=True) as u: + yield u + + +def test_mail_administrators(full_admin_user): + emails = call('mail.local_administrators_emails') + assert MAILADDR in emails + assert NONMAIL_ADDR not in emails diff --git a/tests/api2/test_pool_dataset_encryption.py b/tests/api2/test_pool_dataset_encryption.py new file mode 100644 index 000000000000..3c051be80d56 --- /dev/null +++ b/tests/api2/test_pool_dataset_encryption.py @@ -0,0 +1,406 @@ +import contextlib +import secrets + +import pytest + +from middlewared.service_exception import ValidationErrors +from middlewared.test.integration.assets.pool import another_pool +from middlewared.test.integration.utils import call, ssh +from truenas_api_client.exc import ClientException + + +# genrated token_hex 32bit for +pool_token_hex = secrets.token_hex(32) +dataset_token_hex = secrets.token_hex(32) + +encrypted_pool_name = 'test_encrypted' +dataset = f'{encrypted_pool_name}/encrypted' +child_dataset = f'{dataset}/child' +passphrase = 'my_passphrase' + + +def check_log_for(*phrases, should_find=False): + search_string = '|'.join(phrases) + cmd = f'grep -R -E "{search_string}" /var/log/middlewared.log' + results = ssh(cmd, check=False, complete_response=True) + assert results['result'] is should_find, str(results['output']) + + +def verify_lock_status(ds, *, locked): + job_status_result = call('pool.dataset.encryption_summary', ds, job=True) + for dictionary in job_status_result: + if dictionary['name'] == ds: + assert dictionary['unlock_successful'] is not locked, str(job_status_result) + assert dictionary['locked'] is locked, str(job_status_result) + break + else: + pytest.fail(str(job_status_result)) + + +@contextlib.contextmanager +def create_dataset(payload, **delete_args): + name = payload['name'] + yield call('pool.dataset.create', payload) + assert call('pool.dataset.delete', name, delete_args) + + +@pytest.fixture(scope='class') +def normal_pool(): + with another_pool({'name': encrypted_pool_name}): + yield + + +@pytest.fixture(scope='class') +def passphrase_pool(): + pool_passphrase = 'my_pool_passphrase' + with another_pool({ + 'name': encrypted_pool_name, + 'encryption': True, + 'encryption_options': { + 'algorithm': 'AES-128-CCM', + 'passphrase': pool_passphrase, + }, + }): + check_log_for(pool_passphrase) + ds = call('pool.dataset.get_instance', encrypted_pool_name) + assert ds['key_format']['value'] == 'PASSPHRASE', ds + assert ds['encryption_algorithm']['value'] == 'AES-128-CCM', ds + yield + + +@pytest.fixture(scope='class') +def key_pool(): + with another_pool({ + 'name': encrypted_pool_name, + 'encryption': True, + 'encryption_options': { + 'algorithm': 'AES-128-CCM', + 'key': pool_token_hex, + }, + }): + check_log_for(pool_token_hex) + ds = call('pool.dataset.get_instance', encrypted_pool_name) + assert ds['key_format']['value'] == 'HEX', ds + assert ds['encryption_algorithm']['value'] == 'AES-128-CCM', ds + yield + + +@pytest.mark.usefixtures('normal_pool') +class TestNormalPool: + + def test_passphrase_encrypted_root(self): + payload = { + 'name': dataset, + 'encryption_options': { + 'generate_key': False, + 'pbkdf2iters': 100000, + 'algorithm': 'AES-128-CCM', + 'passphrase': passphrase, + }, + 'encryption': True, + 'inherit_encryption': False + } + with create_dataset(payload) as ds: + assert ds['key_format']['value'] == 'PASSPHRASE' + check_log_for(passphrase) + + # Add a comment + call('pool.dataset.update', dataset, {'comments': 'testing encrypted dataset'}) + + # Change to key encryption + call('pool.dataset.change_key', dataset, {'key': dataset_token_hex}, job=True) + ds = call('pool.dataset.get_instance', dataset) + assert ds['key_format']['value'] == 'HEX' + + @pytest.mark.parametrize('payload', [ + {'encryption': False}, + {'inherit_encryption': True} + ]) + def test_dataset_not_encrypted(self, payload: dict): + payload['name'] = dataset + with create_dataset(payload) as ds: + assert ds['key_format']['value'] is None + + @pytest.mark.parametrize('payload, message', [ + ( + { + 'encryption_options': {'pbkdf2iters': 0}, + 'inherit_encryption': False + }, + 'Should be greater or equal than 100000' + ), + ( + { + 'encryption_options': {'passphrase': passphrase}, + 'inherit_encryption': True + }, + 'Must be disabled when encryption is enabled' + ), + ( + { + 'encryption_options': { + 'generate_key': True, + 'passphrase': passphrase, + }, + 'inherit_encryption': False + }, + 'Must be disabled when dataset is to be encrypted with passphrase' + ) + ]) + def test_try_to_create_invalid_encrypted_dataset(self, payload: dict, message: str): + payload.update({ + 'name': dataset, + 'encryption': True, + }) + with pytest.raises(ValidationErrors, match=message): + with create_dataset(payload): pass + + def test_invalid_encrypted_dataset_does_not_leak_passphrase_into_middleware_log(self): + check_log_for(passphrase) + + @pytest.mark.parametrize('payload', [ + {'encryption_options': {'generate_key': True}}, + {'encryption_options': {'key': dataset_token_hex}} + ]) + def test_encrypted_root_with_key_cannot_lock(self, payload: dict): + payload.update({ + 'name': dataset, + 'encryption': True, + 'inherit_encryption': False, + }) + with create_dataset(payload) as ds: + assert ds['key_format']['value'] == 'HEX' + check_log_for(dataset_token_hex) + + with pytest.raises(ClientException, match='Only datasets which are encrypted with passphrase can be locked'): + call('pool.dataset.lock', dataset, {'force_umount': True}, job=True) + + def test_encrypted_root_lock_unlock(self): + # Start with key-encrypted dataset + payload = { + 'name': dataset, + 'encryption': True, + 'inherit_encryption': False, + 'encryption_options': {'key': dataset_token_hex} + } + with create_dataset(payload): + # Change to a passphrase-encrypted dataset + call('pool.dataset.change_key', dataset, {'passphrase': passphrase}, job=True) + ds = call('pool.dataset.get_instance', dataset) + assert ds['key_format']['value'] == 'PASSPHRASE' + check_log_for(passphrase) + + # Lock it + assert call('pool.dataset.lock', dataset, {'force_umount': True}, job=True) + verify_lock_status(dataset, locked=True) + + # Attempt to unlock with incorrect passphrase + payload = { + 'recursive': True, + 'datasets': [{ + 'name': dataset, + 'passphrase': 'bad_passphrase' + }] + } + job_status = call('pool.dataset.unlock', dataset, payload, job=True) + assert job_status['failed'][dataset]['error'] == 'Invalid Key', job_status + verify_lock_status(dataset, locked=True) + + # Now actually unlock it + payload['datasets'][0]['passphrase'] = passphrase + job_status = call('pool.dataset.unlock', dataset, payload, job=True) + assert job_status['unlocked'] == [dataset], job_status + verify_lock_status(dataset, locked=False) + + +@pytest.mark.usefixtures('passphrase_pool') +class TestPassphraseEncryptedPool: + + def test_passphrase_encrypted_root_cannot_change_key(self): + payload = { + 'name': dataset, + 'encryption_options': { + 'generate_key': False, + 'pbkdf2iters': 100000, + 'algorithm': 'AES-128-CCM', + 'passphrase': passphrase, + }, + 'encryption': True, + 'inherit_encryption': False + } + with create_dataset(payload): + check_log_for(passphrase) + with pytest.raises(Exception, match=f'{dataset} has parent\\(s\\) which are encrypted with a passphrase'): + call('pool.dataset.change_key', dataset, {'key': dataset_token_hex}, job=True) + + def test_passphrase_encrypted_root_cannot_change_key_does_not_leak_passphrase_into_middleware_log(self): + check_log_for(passphrase) + + def test_create_dataset_to_inherit_encryption_from_passphrase_encrypted_pool(self): + payload = { + 'name': dataset, + 'inherit_encryption': True + } + with create_dataset(payload) as ds: + assert ds['key_format']['value'] == 'PASSPHRASE', ds + + @pytest.mark.parametrize('payload', [ + {'encryption_options': {'generate_key': True}}, + {'encryption_options': {'key': dataset_token_hex}}, + ]) + def test_try_to_create_invalid_encrypted_dataset(self, payload: dict): + payload.update({ + 'name': dataset, + 'encryption': True, + 'inherit_encryption': False + }) + with pytest.raises(ValidationErrors, match='Passphrase encrypted datasets cannot have children encrypted with a key'): + with create_dataset(payload): pass + + def test_try_to_create_invalid_encrypted_dataset_does_not_leak_encryption_key_into_middleware_log(self): + check_log_for(dataset_token_hex) + + +@pytest.mark.usefixtures('key_pool') +class TestKeyEncryptedPool: + + def test_key_encrypted_root(self): + # Start with key-encrypted dataset + payload = { + 'name': dataset, + 'encryption_options': {'key': dataset_token_hex}, + 'encryption': True, + 'inherit_encryption': False + } + with create_dataset(payload) as ds: + assert ds['key_format']['value'] == 'HEX', ds + check_log_for(dataset_token_hex) + + # Change to passphrase encryption + call('pool.dataset.change_key', dataset, {'passphrase': passphrase}, job=True) + check_log_for(passphrase) + ds = call('pool.dataset.get_instance', dataset) + assert ds['key_format']['value'] == 'PASSPHRASE', ds + + # Lock the dataset + assert call('pool.dataset.lock', dataset, {'force_umount': True}, job=True) + ds = call('pool.dataset.get_instance', dataset) + assert ds['locked'] is True, ds + verify_lock_status(dataset, locked=True) + + # Unlock the dataset + payload = { + 'recursive': True, + 'datasets': [{ + 'name': dataset, + 'passphrase': passphrase + }] + } + job_status = call('pool.dataset.unlock', dataset, payload, job=True) + assert job_status['unlocked'] == [dataset], job_status + check_log_for(passphrase) + verify_lock_status(dataset, locked=False) + + def test_dataset_with_inherit_encryption(self): + payload = { + 'name': dataset, + 'inherit_encryption': True + } + with create_dataset(payload) as ds: + assert ds['key_format']['value'] == 'HEX', ds + + def test_encrypted_dataset_with_generate_key(self): + payload = { + 'name': dataset, + 'encryption_options': {'generate_key': True}, + 'encryption': True, + 'inherit_encryption': False + } + with create_dataset(payload): pass + + def test_passphrase_encrypted_dataset_parent_child_lock_unlock(self): + payload = { + 'name': dataset, + 'encryption_options': {'passphrase': passphrase}, + 'encryption': True, + 'inherit_encryption': False + } + with create_dataset(payload, recursive=True): # Create parent dataset + check_log_for(passphrase) + + # Create child dataset + child_passphrase = 'my_passphrase2' + payload.update({ + 'name': child_dataset, + 'encryption_options': {'passphrase': child_passphrase}, + }) + call('pool.dataset.create', payload) + check_log_for(child_passphrase) + + # Lock parent (and child) + assert call('pool.dataset.lock', dataset, job=True) + for ds_name in (dataset, child_dataset): + ds = call('pool.dataset.get_instance', ds_name) + assert ds['locked'] is True, ds + verify_lock_status(ds_name, locked=True) + + # Try to unlock child + payload = { + 'recursive': True, + 'datasets': [{ + 'name': child_dataset, + 'passphrase': child_passphrase + }] + } + with pytest.raises(ClientException, match=f'{child_dataset} has locked parents {dataset} which must be unlocked first'): + call('pool.dataset.unlock', child_dataset, payload, job=True) + check_log_for(child_passphrase) + verify_lock_status(child_dataset, locked=True) + + # Unlock parent (and child) + payload = { + 'recursive': True, + 'datasets': [ + { + 'name': dataset, + 'passphrase': passphrase + }, + { + 'name': child_dataset, + 'passphrase': child_passphrase + } + ] + } + job_status = call('pool.dataset.unlock', dataset, payload, job=True) + assert job_status['unlocked'] == [dataset, child_dataset], job_status + check_log_for(passphrase, child_passphrase) + for ds_name in (dataset, child_dataset): + ds = call('pool.dataset.get_instance', ds_name) + assert ds['locked'] is False, ds + verify_lock_status(ds_name, locked=False) + + def test_key_encrypted_dataset(self): + # Create parent dataset + payload = { + 'name': dataset, + 'encryption_options': {'key': dataset_token_hex}, + 'encryption': True, + 'inherit_encryption': False + } + call('pool.dataset.create', payload) + check_log_for(dataset_token_hex) + + # Create child dataset + payload.update({ + 'name': child_dataset, + 'encryption_options': {'passphrase': passphrase}, + }) + call('pool.dataset.create', payload) + check_log_for(passphrase) + ds = call('pool.dataset.get_instance', child_dataset) + assert ds['key_format']['value'] == 'PASSPHRASE', ds + + # Inherit key encryption from parent + call('pool.dataset.inherit_parent_encryption_properties', child_dataset) + ds = call('pool.dataset.get_instance', child_dataset) + assert ds['key_format']['value'] == 'HEX', ds diff --git a/tests/api2/test_pool_dataset_unlock.py b/tests/api2/test_pool_dataset_unlock.py new file mode 100644 index 000000000000..9ba1eac7ca8b --- /dev/null +++ b/tests/api2/test_pool_dataset_unlock.py @@ -0,0 +1,131 @@ +import contextlib + +import pytest + +from middlewared.test.integration.assets.account import user +from middlewared.test.integration.assets.pool import dataset +from middlewared.test.integration.assets.smb import smb_share +from middlewared.test.integration.utils import call, ssh +from protocols import SMB +from samba import ntstatus, NTSTATUSError + + +SMB_PASSWORD = 'Abcd1234' +SMB_USER = 'smbuser999' + + +def passphrase_encryption(): + return { + 'encryption_options': { + 'generate_key': False, + 'pbkdf2iters': 100000, + 'algorithm': 'AES-128-CCM', + 'passphrase': 'passphrase', + }, + 'encryption': True, + 'inherit_encryption': False, + } + +def lock_dataset(name): + payload = { + 'force_umount': True + } + assert call('pool.dataset.lock', name, payload, job=True) + + +def unlock_dataset(name, options=None): + payload = { + 'recursive': True, + 'datasets': [ + { + 'name': name, + 'passphrase': 'passphrase' + } + ], + **(options or {}), + } + result = call('pool.dataset.unlock', name, payload, job=True) + assert result['unlocked'] == [name], str(result) + + +@contextlib.contextmanager +def smb_connection(**kwargs): + c = SMB() + c.connect(**kwargs) + + try: + yield c + finally: + c.disconnect() + + +@pytest.fixture(scope='module') +def smb_user(): + with user({ + 'username': SMB_USER, + 'full_name': 'doug', + 'group_create': True, + 'password': SMB_PASSWORD, + 'smb': True + }, get_instance=True) as u: + yield u + + +@pytest.mark.parametrize('toggle_attachments', [True, False]) +def test_pool_dataset_unlock_smb(smb_user, toggle_attachments): + with ( + # Prepare test SMB share + dataset('normal', mode='777') as normal, + smb_share(f'/mnt/{normal}', 'normal', {'guestok': True}), + # Create an encrypted SMB share, unlocking which might lead to SMB service interruption + dataset('encrypted', passphrase_encryption(), mode='777') as encrypted, + smb_share(f'/mnt/{encrypted}', 'encrypted', {'guestok': True}) + ): + ssh(f'touch /mnt/{encrypted}/secret') + assert call('service.start', 'cifs') + lock_dataset(encrypted) + # Mount test SMB share + with smb_connection( + share='normal', + username=SMB_USER, + password=SMB_PASSWORD + ) as normal_connection: + # Locked share should not be mountable + with pytest.raises(NTSTATUSError) as e: + with smb_connection( + share='encrypted', + username=SMB_USER, + password=SMB_PASSWORD + ): + pass + + assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME + + conn = normal_connection.show_connection() + assert conn['connected'], conn + unlock_dataset(encrypted, {'toggle_attachments': toggle_attachments}) + + conn = normal_connection.show_connection() + assert conn['connected'], conn + + if toggle_attachments: + # We should be able to mount encrypted share + with smb_connection( + share='encrypted', + username=SMB_USER, + password=SMB_PASSWORD + ) as encrypted_connection: + assert [x['name'] for x in encrypted_connection.ls('')] == ['secret'] + else: + # We should still not be able to mount encrypted share as we did not reload attachments + with pytest.raises(NTSTATUSError) as e: + with smb_connection( + share='encrypted', + username=SMB_USER, + password=SMB_PASSWORD + ): + pass + + assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME + + assert call('service.stop', 'cifs') diff --git a/tests/api2/test_staticroutes.py b/tests/api2/test_staticroutes.py new file mode 100644 index 000000000000..22e2e95429d2 --- /dev/null +++ b/tests/api2/test_staticroutes.py @@ -0,0 +1,43 @@ +import pytest + +from middlewared.test.integration.utils import call, ssh + +DESTINATION = '127.1.1.1' +GATEWAY = '127.0.0.1' + +@pytest.fixture(scope='module') +def sr_dict(): + return {} + + +def test_creating_staticroute(sr_dict): + sr_dict['newroute'] = call('staticroute.create', { + 'destination': DESTINATION, + 'gateway': GATEWAY, + 'description': 'test route', + }) + + +def test_check_staticroute_configured_using_api(sr_dict): + data = call('staticroute.query', [['id', '=', sr_dict['newroute']['id']]], {'get': True}) + assert DESTINATION in data['destination'] + assert data['gateway'] == GATEWAY + + +def test_checking_staticroute_configured_using_ssh(request): + results = ssh(f'netstat -4rn|grep -E ^{DESTINATION}', complete_response=True) + assert results['result'] is True + assert results['stdout'].strip().split()[1] == GATEWAY + + +def test_delete_staticroute(sr_dict): + call('staticroute.delete', sr_dict['newroute']['id']) + + +def test_check_staticroute_unconfigured_using_api(sr_dict): + assert call('staticroute.query', [['destination', '=', DESTINATION]]) == [] + + +def test_checking_staticroute_unconfigured_using_ssh(request): + results = ssh(f'netstat -4rn|grep -E ^{DESTINATION}', complete_response=True, check=False) + assert results['result'] is False diff --git a/tests/api2/test_system_general.py b/tests/api2/test_system_general.py index ec8d2dba4ba7..1c9b97a7755a 100644 --- a/tests/api2/test_system_general.py +++ b/tests/api2/test_system_general.py @@ -2,6 +2,27 @@ TIMEZONE = "America/New_York" +def test_check_system_set_time(): + """ + This test intentionally slews our clock to be off + by 300 seconds and then verifies that it got set + """ + results = call("system.info") + + # Convert to seconds + datetime = int(results["datetime"].timestamp()) + + # hop 300 seconds into the past + target = datetime - 300 + call("system.set_time", int(target)) + + results = call("system.info") + datetime2 = int(results["datetime"].timestamp()) + + # This is a fudge-factor because NTP will start working + # pretty quickly to correct the slew. + assert abs(target - datetime2) < 60 + def test_setting_timezone(): assert TIMEZONE in call("system.general.timezone_choices") diff --git a/tests/requirements.txt b/tests/requirements.txt index ed4bfea7700a..df6fa412bc82 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,5 +1,6 @@ boto3 dnspython +junitparser pytest pytest-dependency pytest-rerunfailures diff --git a/tests/run_unit_tests.py b/tests/run_unit_tests.py new file mode 100644 index 000000000000..ef4618692838 --- /dev/null +++ b/tests/run_unit_tests.py @@ -0,0 +1,162 @@ +# This script should be run locally from a TrueNAS VM. It runs all tests +# contained within the tests/unit directory as well as middleware specific unit +# tests contained within src/middlewared/middlewared/pytest/unit. +# +# NOTE: this requires `make install_tests` to have been run on the TrueNAS VM. + +import argparse +import middlewared +import os +import pytest +import sys + +from contextlib import contextmanager +from collections.abc import Generator +from dataclasses import dataclass +from junitparser import JUnitXml +from shutil import copytree, rmtree +from truenas_api_client import Client +from uuid import uuid4 + +DESCRIPTION = ( + 'Run unit tests from the specified middleware git repository on the ' + 'current TrueNAS server (version 25.04 or later). Exit code is one of ' + 'pytest exit codes with zero indicating success.' +) + +UNIT_TESTS = 'tests/unit' +MIDDLEWARE_MODULE_PATH = os.path.dirname(os.path.abspath(middlewared.__file__)) +MIDDLEWARE_PYTEST = 'src/middlewared/middlewared/pytest' +MIDDLEWARE_UNIT_TESTS = os.path.join(MIDDLEWARE_PYTEST, 'unit') +MIDDLEWARE_PYTEST_MODULE = os.path.join(MIDDLEWARE_MODULE_PATH, 'pytest') +RESULT_FILE = 'unit_tests_result.xml' +PYTEST_CONFTEST_FILE = 'tests/conftest.py' + + +@dataclass() +class UnitTestRun: + tests_dir: str + exit_code: pytest.ExitCode = pytest.ExitCode.NO_TESTS_COLLECTED + junit_file: str | None = None + + +def run_tests(data: UnitTestRun) -> UnitTestRun: + junit_file = f'unit_tests_result_{uuid4()}.xml' + + data.exit_code = pytest.main([ + '--disable-warnings', '-vv', + '-o', 'junit_family=xunit2', + '--junitxml', junit_file, + data.tests_dir + ]) + + if data.exit_code is not pytest.ExitCode.OK: + print( + f'{data.tests_dir}: tests failed with code: {data.exit_code}', + file=sys.stderr + ) + + data.junit_file = junit_file + return data + + +def run_unit_tests(repo_dir: str) -> pytest.ExitCode: + """ + Iterate through our unit test sources and create a unified junit xml file + for the overall test results. + """ + xml_out = JUnitXml() + exit_code = pytest.ExitCode.NO_TESTS_COLLECTED + for test_dir in ( + os.path.join(repo_dir, UNIT_TESTS), + os.path.join(repo_dir, MIDDLEWARE_UNIT_TESTS), + ): + if not os.path.exists(test_dir): + raise FileNotFoundError(f'{test_dir}: unit test directory does not exist') + + data = run_tests(UnitTestRun(tests_dir=test_dir)) + xml_out += JUnitXml.fromfile(data.junit_file) + try: + os.remove(data.junit_file) + except Exception: + pass + + match data.exit_code: + case pytest.ExitCode.NO_TESTS_COLLECTED: + # We'll treat this as a partial failure because we still want our + # test results from other runs, but don't want an overall misleading + # result. + print( + f'{test_dir}: not tests collected. Treating as partial failure.', + file=sys.stderr + ) + if exit_code is pytest.ExitCode.OK: + exit_code = pytest.ExitCode.TESTS_FAILED + + case pytest.ExitCode.OK: + # If this is our first OK test, set exit code + # otherwise preserve existing + if exit_code is pytest.ExitCode.NO_TESTS_COLLECTED: + exit_code = data.exit_code + + case _: + # exit codes are an IntEnum. Preserve worst case + if exit_code < data.exit_code: + exit_code = data.exit_code + + xml_out.write(RESULT_FILE) + return exit_code + + +@contextmanager +def disable_api_test_config(path: str) -> Generator[None, None, None]: + """ prevent API tests conftest from being applied """ + os.rename( + os.path.join(path, PYTEST_CONFTEST_FILE), + os.path.join(path, f'{PYTEST_CONFTEST_FILE}.tmp') + ) + + try: + yield + finally: + os.rename( + os.path.join(path, f'{PYTEST_CONFTEST_FILE}.tmp'), + os.path.join(path, PYTEST_CONFTEST_FILE) + ) + + +@contextmanager +def setup_middleware_tests(path: str) -> Generator[None, None, None]: + """ temporarily setup our pytest tests in the python dir """ + try: + copytree( + os.path.join(path, MIDDLEWARE_PYTEST), + os.path.join(MIDDLEWARE_PYTEST_MODULE) + ) + yield + finally: + rmtree(MIDDLEWARE_PYTEST_MODULE) + + +def main() -> None: + parser = argparse.ArgumentParser(description=DESCRIPTION) + parser.add_argument( + '-p', '--path', + help='Path to local copy of middleware git repository', + default='./middleware' + ) + + # lazy check to verify we're on a TrueNAS server + with Client() as c: + assert c.call('system.ready') + + args = parser.parse_args() + with disable_api_test_config(args.path): + with setup_middleware_tests(args.path): + exit_code = run_unit_tests(args.path) + + sys.exit(exit_code) + + +if __name__ == '__main__': + main() diff --git a/tests/unit/test_pam_tdb.py b/tests/unit/test_pam_tdb.py new file mode 100644 index 000000000000..49ebbc4f301a --- /dev/null +++ b/tests/unit/test_pam_tdb.py @@ -0,0 +1,328 @@ +import os +import pam +import pwd +import pytest +import tdb + +from collections.abc import Generator +from contextlib import contextmanager +from middlewared.utils import crypto +from middlewared.utils import user_api_key +from time import monotonic + +EXPIRED_TS = 1 +BASE_ID = 1325 +LEGACY_ENTRY_KEY = 'rtpz6u16l42XJJGy5KMJOVfkiQH7CyitaoplXy7TqFTmY7zHqaPXuA1ob07B9bcB' +LEGACY_ENTRY_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI' +INVALID_HASH_TYPE = '$pbkdf2-canary$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI' +INVALID_SALT = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc0*g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI' +INVALID_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4a*I' +MISSING_SALT = '$pbkdf2-sha256$29000$$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI' +MISSING_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$' +EMPTY_HASH_STRING = '' + +PAM_DIR = '/etc/pam.d' +PAM_FILE = 'middleware-api-key' +PAM_AUTH_LINE = 'auth [success=1 default=die] pam_tdb.so debug ' +PAM_FAIL_DELAY = 1.50 # pam_fail_delay is 2s, but we need a little wiggle-room + +PAM_FILE_REMAINING_CONTENTS = """ +@include common-auth-unix +@include common-account +password required pam_deny.so +session required pam_deny.so +""" + + +def write_tdb_file( + username: str, + hashlist: list[str], + expired: bool = False +) -> int: + """ + Generate a tdb file based on the specified parameters + The resulting TDB will have one entry for `username` and + a varying amount of hashes. + + Although each hash supports a separate expiry, we are only + concerned in these tests expired hashes generate PAM_AUTH_ERR + as expected. + """ + + keys = [] + idx = 0 + + for idx, thehash in enumerate(hashlist): + keys.append(user_api_key.UserApiKey( + userhash=thehash, + dbid=BASE_ID + idx, + expiry=EXPIRED_TS if expired else 0 + )) + + entry = user_api_key.PamTdbEntry(username=username, keys=keys) + + user_api_key.flush_user_api_keys([entry]) + + return BASE_ID + idx + + +def truncate_tdb_file(username: str) -> None: + """ + Truncate tdb entry to make pascal string point off end of buffer + If this sets PAM_AUTH_ERR then we need to look closely to make + sure we don't have parser issues in pam_tdb.c + """ + hdl = tdb.open(user_api_key.PAM_TDB_FILE) + try: + data = hdl.get(username.encode()) + hdl.store(username.encode(), data[0:len(data) - 5]) + finally: + hdl.close() + + +def make_tdb_garbage(username: str) -> None: + """ fill entry with non-api-key data """ + hdl = tdb.open(user_api_key.PAM_TDB_FILE) + try: + hdl.get(username.encode()) + hdl.store(username.encode(), b'meow') + finally: + hdl.close() + + +def make_null_tdb_entry(username: str) -> None: + """ throw some nulls into the mix for fun """ + hdl = tdb.open(user_api_key.PAM_TDB_FILE) + try: + hdl.get(username.encode()) + hdl.store(username.encode(), b'\x00' * 128) + finally: + hdl.close() + + +@contextmanager +def pam_service( + file_name: str = PAM_FILE, + admin_user: str | None = None, +) -> Generator[str, None, None]: + """ Create a pam service file for pam_tdb.so """ + auth_entry = PAM_AUTH_LINE + if admin_user: + auth_entry += f'truenas_admin={admin_user}' + + pam_service_path = os.path.join(PAM_DIR, file_name) + + with open(pam_service_path, 'w') as f: + f.write(auth_entry) + f.write(PAM_FILE_REMAINING_CONTENTS) + f.flush() + + try: + yield file_name + finally: + os.remove(pam_service_path) + + +@contextmanager +def fail_delay() -> Generator[None, None, None]: + """ + Assert if failure case finishes faster than our expected fail delay, which + is an amount of time randomly distributed (by up to 25%) about the longest + value set. In our case, this is 2 seconds and so the can be anywhere + between 1.5 seconds and 2.5 seconds. We are only concerned in these tests + about the lower margin to ensure that we aren't immediately failing. If + we don't insert a failure delay then this may introduce vulnerability to + timing attacks on passwords. + """ + now = monotonic() + yield + elapsed = monotonic() - now + assert elapsed > PAM_FAIL_DELAY + + +@pytest.fixture(scope='module') +def current_username(): + """ for simplicity sake we'll test against current user """ + return pwd.getpwuid(os.geteuid()).pw_name + + +def test_unknown_user(current_username): + """ + A user without an entry in the file should fail with appropriate error + and generate a fail delay + """ + db_id = write_tdb_file(current_username, [LEGACY_ENTRY_HASH]) + with pam_service(admin_user=current_username) as svc: + p = pam.pam() + with fail_delay(): + authd = p.authenticate('canary', f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc) + assert authd is False + assert p.code == pam.PAM_USER_UNKNOWN + + +def test_legacy_auth_admin(current_username): + """ This should succeed for specified admin user """ + db_id = write_tdb_file(current_username, [LEGACY_ENTRY_HASH]) + with pam_service(admin_user=current_username) as svc: + p = pam.pam() + authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc) + assert authd is True + assert p.code == pam.PAM_SUCCESS + + with fail_delay(): + # attempt to authenticate with invalid key should trigger a fail delay + authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY[0:-1]}', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_legacy_auth_admin_expired_key(current_username): + """ Verify that an expired key results in PAM_AUTH_ERR """ + db_id = write_tdb_file(current_username, [LEGACY_ENTRY_HASH], True) + with pam_service(admin_user=current_username) as svc: + p = pam.pam() + authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_legacy_auth_non_admin(current_username): + """ Test that legacy hash doesn't work for non-admin user + We really want to deprecate these legacy keys. + """ + write_tdb_file(current_username, [LEGACY_ENTRY_HASH]) + with pam_service() as svc: + with fail_delay(): + p = pam.pam() + authd = p.authenticate(current_username, LEGACY_ENTRY_KEY, service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_legacy_auth_multiple_entries(current_username): + """ verify last entry in hash list can be used to auth + We allow multiple keys per user. Ensure that we can use more than the + first key. + """ + hashes = [crypto.generate_pbkdf2_512('canary') for i in range(0, 5)] + hashes.append(LEGACY_ENTRY_HASH) + + db_id = write_tdb_file(current_username, hashes) + with pam_service(admin_user=current_username) as svc: + p = pam.pam() + authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc) + assert authd is True + assert p.code == pam.PAM_SUCCESS + + +def test_new_auth(current_username): + """ verify that that new hash works as expected """ + key = crypto.generate_string(string_size=64) + db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)]) + + with pam_service() as svc: + p = pam.pam() + # verify that using correct key succeeds + authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc) + assert authd is True + assert p.code == pam.PAM_SUCCESS + + # verify that using incorrect key fails + with fail_delay(): + authd = p.authenticate(current_username, f'{db_id}-{key[0:-1]}', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_new_auth_truncated_password(current_username): + """ Verify that truncated password generates auth error """ + key = crypto.generate_string(string_size=64) + db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)]) + + with pam_service() as svc: + p = pam.pam() + with fail_delay(): + authd = p.authenticate(current_username, f'{db_id}-', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_new_auth_multi(current_username): + """ verify that second key works with newer hash """ + key = crypto.generate_string(string_size=64) + db_id = write_tdb_file(current_username, [ + LEGACY_ENTRY_HASH, + crypto.generate_pbkdf2_512(key) + ]) + with pam_service() as svc: + p = pam.pam() + # verify that using correct key succeeds + authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc) + assert authd is True + assert p.code == pam.PAM_SUCCESS + + # verify that using incorrect key fails + with fail_delay(): + authd = p.authenticate(current_username, f'{db_id}-{key[0:-1]}', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_new_auth_timeout(current_username): + """ verify that valid but expired key denies auth with expected error code """ + key = crypto.generate_string(string_size=64) + db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)], True) + with pam_service() as svc: + p = pam.pam() + with fail_delay(): + authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTH_ERR + + +def test_unsupported_service_file_name(current_username): + """ pam_tdb has strict check that it can't be used for other services """ + key = crypto.generate_string(string_size=64) + db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)]) + with pam_service(file_name='canary') as svc: + p = pam.pam() + # verify that using correct key succeeds + authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc) + assert authd is False + assert p.code == pam.PAM_SYSTEM_ERR + + +@pytest.mark.parametrize('thehash,pam_error', [ + (INVALID_HASH_TYPE, pam.PAM_AUTH_ERR), + (INVALID_SALT, pam.PAM_AUTH_ERR), + (INVALID_HASH, pam.PAM_AUTH_ERR), + (MISSING_SALT, pam.PAM_AUTH_ERR), + (MISSING_HASH, pam.PAM_AUTH_ERR), + (EMPTY_HASH_STRING, pam.PAM_AUTHINFO_UNAVAIL), +]) +def test_invalid_hash(current_username, thehash, pam_error): + """ Check that variations of broken hash entries generate expected error """ + db_id = write_tdb_file(current_username, [thehash]) + with pam_service(admin_user=current_username) as svc: + p = pam.pam() + # verify that using correct key succeeds + authd = p.authenticate(current_username, f'{db_id}-{LEGACY_ENTRY_KEY}', service=svc) + assert authd is False + assert p.code == pam_error + + +@pytest.mark.parametrize('fuzz_fn', [ + truncate_tdb_file, + make_tdb_garbage, + make_null_tdb_entry, +]) +def test_invalid_tdb_data(current_username, fuzz_fn): + """ verify we detect garbage tdb entry and flag for reinit""" + key = crypto.generate_string(string_size=64) + db_id = write_tdb_file(current_username, [crypto.generate_pbkdf2_512(key)], True) + fuzz_fn(current_username) + with pam_service() as svc: + p = pam.pam() + authd = p.authenticate(current_username, f'{db_id}-{key}', service=svc) + assert authd is False + assert p.code == pam.PAM_AUTHINFO_UNAVAIL