diff --git a/data_safe_haven/infrastructure/programs/declarative_sre.py b/data_safe_haven/infrastructure/programs/declarative_sre.py index 9648f6ac3a..2228078c36 100644 --- a/data_safe_haven/infrastructure/programs/declarative_sre.py +++ b/data_safe_haven/infrastructure/programs/declarative_sre.py @@ -198,20 +198,6 @@ def __call__(self) -> None: tags=self.tags, ) - # Deploy monitoring - monitoring = SREMonitoringComponent( - "sre_monitoring", - self.stack_name, - SREMonitoringProps( - dns_private_zones=dns.private_zones, - location=self.config.azure.location, - resource_group_name=resource_group.name, - subnet=networking.subnet_monitoring, - timezone=self.config.sre.timezone, - ), - tags=self.tags, - ) - # Deploy data storage data = SREDataComponent( "sre_data", diff --git a/data_safe_haven/infrastructure/programs/sre/data.py b/data_safe_haven/infrastructure/programs/sre/data.py index 645ea8fd87..825861c122 100644 --- a/data_safe_haven/infrastructure/programs/sre/data.py +++ b/data_safe_haven/infrastructure/programs/sre/data.py @@ -7,6 +7,7 @@ from pulumi import ComponentResource, Input, Output, ResourceOptions from pulumi_azure_native import ( authorization, + insights, keyvault, managedidentity, network, @@ -425,6 +426,45 @@ def __init__( resource_group_name=kwargs["resource_group_name"], ) ) + # Add diagnostic setting for files + insights.DiagnosticSetting( + f"{storage_account_data_configuration._name}_diagnostic_setting", + name=f"{storage_account_data_configuration._name}_diagnostic_setting", + log_analytics_destination_type="Dedicated", + logs=[ + { + "category_group": "allLogs", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + { + "category_group": "audit", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + ], + metrics=[ + { + "category": "Transaction", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + } + ], + # This is the URI of the automatically created fileService resource + resource_uri=Output.concat( + storage_account_data_configuration.id, "/fileServices/default" + ), + workspace_id=props.log_analytics_workspace.id, + ) # Set up a private endpoint for the configuration data storage account storage_account_data_configuration_private_endpoint = network.PrivateEndpoint( f"{storage_account_data_configuration._name}_private_endpoint", @@ -625,6 +665,45 @@ def __init__( opts=child_opts, tags=child_tags, ) + # Add diagnostic setting for files + insights.DiagnosticSetting( + f"{storage_account_data_private_user._name}_diagnostic_setting", + name=f"{storage_account_data_private_user._name}_diagnostic_setting", + log_analytics_destination_type="Dedicated", + logs=[ + { + "category_group": "allLogs", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + { + "category_group": "audit", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + ], + metrics=[ + { + "category": "Transaction", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + } + ], + # This is the URI of the automatically created fileService resource + resource_uri=Output.concat( + storage_account_data_private_user.id, "/fileServices/default" + ), + workspace_id=props.log_analytics_workspace.id, + ) storage.FileShare( f"{storage_account_data_private_user._name}_files_home", access_tier=storage.ShareAccessTier.PREMIUM, diff --git a/docs/source/management/logs.md b/docs/source/management/logs.md index d7607cf98a..10b9bfb0e5 100644 --- a/docs/source/management/logs.md +++ b/docs/source/management/logs.md @@ -43,6 +43,35 @@ There are two tables, : Various metrics on blob container utilisation and performance. : This table is not reserved for the desired state data container and other resources may log to it. +### User data logs + +The user data file share holds the {ref}`researchers'` [home directories](https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s08.html), where they will store their personal data and configuration. +Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`StorageFileLogs` +: NFS events occurring on the file share. +: For example data being written or directories being accessed + +`AzureMetrics` +: Various metrics on file share utilisation and performance. +: This table is not reserved for the user data share and other resources may log to it. + +### Configuration data logs + +There are multiple configuration data file shares. +Each contains the configuration and state data for the Data Safe Haven [services deployed as containers](#container-logs). +Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`StorageFileLogs` +: SMB events occurring on the file share. +: For example data being written or directories being accessed + +`AzureMetrics` +: Various metrics on file share utilisation and performance. +: This table is not reserved for the configuration data shares and other resources may log to it. + ## Container logs Some of the Data Safe Haven infrastructure is provisioned as containers.