From 8068166963db9b763b346a26bc79910541e4c90d Mon Sep 17 00:00:00 2001 From: canton-machine <48923836+canton-machine@users.noreply.github.com> Date: Wed, 26 Jun 2024 08:41:17 +0200 Subject: [PATCH] [main] Update 2024-06-25.23 (#188) Reference commit: c6ea53392f Co-authored-by: Canton --- .../participant/v30/inspection_service.proto | 2 - .../v30/traffic_control_service.proto | 6 +- .../version/ProtocolVersionAnnotation.scala | 3 + .../client/commands/LedgerApiCommands.scala | 37 ++ .../commands/ParticipantAdminCommands.scala | 25 +- .../canton/config/CantonConfig.scala | 20 +- .../config/CommunityConfigValidations.scala | 20 +- .../canton/console/InstanceReference.scala | 4 +- .../commands/LedgerApiAdministration.scala | 103 ++-- .../commands/ParticipantAdministration.scala | 3 + .../commands/PartiesAdministration.scala | 4 +- .../commands/TopologyAdministration.scala | 57 ++- .../canton/environment/Environment.scala | 1 + .../canton/metrics/MetricsRegistry.scala | 16 +- .../digitalasset/canton/CantonAppDriver.scala | 3 +- .../beta-version-support.conf | 9 + .../canton/environment/NodesTest.scala | 1 + .../tests/release/CliIntegrationTest.scala | 2 +- .../v30/traffic_control_parameters.proto | 10 +- .../com/digitalasset/canton/data/Offset.scala | 23 + .../environment/CantonNodeParameters.scala | 3 + .../lifecycle/FutureUnlessShutdown.scala | 7 + .../canton/protocol/DomainParameters.scala | 14 +- .../messages/RootHashMessageRecipients.scala | 81 ++-- ...playingSendsSequencerClientTransport.scala | 35 +- .../protocol/SubmissionRequest.scala | 9 +- .../sequencing/protocol/TrafficState.scala | 35 +- .../sequencing/traffic/TrafficConsumed.scala | 35 +- .../traffic/TrafficConsumedManager.scala | 3 +- .../topology/TopologyManagerError.scala | 62 ++- .../topology/TopologyStateProcessor.scala | 14 +- .../IdentityProvidingServiceClient.scala | 9 + .../processing/AuthorizationGraph.scala | 282 +++++------ ...ogyTransactionAuthorizationValidator.scala | 289 ++++++------ .../TransactionAuthorizationValidator.scala | 42 +- .../store/TopologyTransactionRejection.scala | 61 ++- .../transaction/TopologyMapping.scala | 78 +++- .../transaction/TopologyMappingChecks.scala | 317 +++++++++---- .../canton/tracing/TracedScaffeine.scala | 30 +- .../canton/version/HasVersionedWrapper.scala | 5 +- .../canton/version/ProtocolVersion.scala | 68 ++- .../ledger/javaapi/data/PackageVersion.java | 65 +++ .../javaapi/data/PackageVersionSpec.scala | 43 ++ .../src/main/daml/CantonExamples/daml.yaml | 2 +- .../canton/h2/stable/V1_1__initial.sql | 6 +- .../canton/h2/stable/V2__lapi_3.0.sha256 | 2 +- .../canton/h2/stable/V2__lapi_3.0.sql | 3 +- .../canton/postgres/stable/V1_1__initial.sql | 6 +- .../stable/V1_2__initial_views.sha256 | 2 +- .../postgres/stable/V1_2__initial_views.sql | 3 +- .../postgres/stable/V2_0__lapi_3.0.sha256 | 2 +- .../canton/postgres/stable/V2_0__lapi_3.0.sql | 1 + .../canton/config/ProtocolConfig.scala | 1 + .../TransactionConfirmationRequest.scala | 34 +- .../topology/QueueBasedDomainOutbox.scala | 2 +- .../topology/StoreBasedDomainOutbox.scala | 6 +- .../ProtocolVersionCompatibility.scala | 51 +- .../ReleaseVersionToProtocolVersions.scala | 56 ++- .../protocol/RecipientsTreeTest.scala | 12 +- .../canton/data/GeneratorsTrafficData.scala | 2 + .../messages/TopologyTransactionTest.scala | 6 +- .../client/SequencerClientTest.scala | 3 + .../IdentityProvidingServiceClientTest.scala | 5 + .../StoreBasedDomainTopologyClientTest.scala | 4 +- .../processing/AuthorizationGraphTest.scala | 63 ++- ...lizedNamespaceAuthorizationGraphTest.scala | 44 +- ...ransactionAuthorizationValidatorTest.scala | 440 +++++++++++++----- .../TopologyTransactionTestFactory.scala | 37 +- .../store/TopologyStoreTestData.scala | 4 +- .../transaction/GeneratorsTransaction.scala | 41 ++ .../ValidatingTopologyMappingChecksTest.scala | 204 ++++---- .../canton/tracing/TracedScaffeineTest.scala | 18 +- .../HasProtocolVersionedWrapperTest.scala | 10 +- .../canton/version/ProtocolVersionTest.scala | 1 - .../demo/src/main/daml/ai-analysis/daml.yaml | 2 +- community/demo/src/main/daml/bank/daml.yaml | 2 +- community/demo/src/main/daml/doctor/daml.yaml | 2 +- .../src/main/daml/health-insurance/daml.yaml | 2 +- .../src/main/daml/medical-records/daml.yaml | 2 +- .../block/BlockSequencerStateManager.scala | 38 +- .../block/update/BlockChunkProcessor.scala | 1 + .../update/SubmissionRequestValidator.scala | 35 +- .../update/TrafficControlValidator.scala | 4 +- .../config/DomainParametersConfig.scala | 1 + ...irmationRequestAndResponseProcessor.scala} | 6 +- .../canton/domain/mediator/Mediator.scala | 2 +- .../domain/mediator/MediatorEvent.scala | 2 +- .../mediator/MediatorEventsProcessor.scala | 4 +- .../canton/domain/mediator/MediatorNode.scala | 3 +- .../domain/mediator/VerdictSender.scala | 2 +- .../config/SequencerNodeParameterConfig.scala | 1 + .../sequencer/DatabaseSequencer.scala | 36 +- .../sequencing/sequencer/Sequencer.scala | 4 +- .../sequencer/SequencerReader.scala | 98 ++-- .../sequencer/block/BlockSequencer.scala | 47 +- .../block/BlockSequencerFactory.scala | 1 - .../sequencer/store/DbSequencerStore.scala | 6 +- .../store/InMemorySequencerStore.scala | 3 + .../sequencer/store/SequencerStore.scala | 16 +- .../EnterpriseSequencerRateLimitManager.scala | 12 +- .../store/db/DbTrafficConsumedStore.scala | 30 +- ...tionRequestAndResponseProcessorTest.scala} | 52 ++- .../DatabaseSequencerSnapshottingTest.scala | 24 +- .../sequencer/SequencerApiTest.scala | 80 +++- .../sequencer/SequencerReaderTest.scala | 1 + .../sequencer/store/SequencerStoreTest.scala | 63 ++- .../GrpcSequencerIntegrationTest.scala | 1 + ...erpriseSequencerRateLimitManagerTest.scala | 55 ++- .../store/TrafficConsumedStoreTest.scala | 20 +- .../IntegrationTestUtilities.scala | 3 +- .../v2/admin/command_inspection_service.proto | 73 +++ .../ledger/api/v2/experimental_features.proto | 6 + ...ommandInspectionServiceAuthorization.scala | 36 ++ .../CommandServiceAuthorization.scala | 8 +- ...ommandSubmissionServiceAuthorization.scala | 2 +- .../services/CommandInspectionService.scala | 18 + ...andInspectionServiceRequestValidator.scala | 26 ++ .../api/validation/CommandsValidator.scala | 4 - .../participant/state/CompletionInfo.scala | 6 +- .../participant/state/ReadService.scala | 74 +-- .../participant/state/SubmitterInfo.scala | 4 +- .../participant/state/WriteService.scala | 80 +++- .../state/metrics/TimedReadService.scala | 70 +-- .../state/metrics/TimedWriteService.scala | 64 +++ .../canton/platform/InMemoryState.scala | 4 + .../canton/platform/LedgerApiServer.scala | 3 + .../canton/platform/ResourceCloseable.scala | 46 ++ .../platform/apiserver/ApiServiceOwner.scala | 10 +- .../platform/apiserver/ApiServices.scala | 221 ++++----- .../platform/apiserver/LedgerFeatures.scala | 6 +- .../execution/CommandProgressTracker.scala | 228 +++++++++ .../StoreBackedCommandExecutor.scala | 6 +- .../ApiCommandSubmissionService.scala | 36 +- .../services/ApiPackageService.scala | 10 +- .../apiserver/services/ApiStateService.scala | 8 +- .../apiserver/services/ApiUpdateService.scala | 88 ++-- .../services/ApiVersionService.scala | 3 +- .../admin/ApiCommandInspectionService.scala | 67 +++ .../admin/ApiPackageManagementService.scala | 18 +- .../admin/ApiParticipantPruningService.scala | 15 +- .../CommandInspectionServiceImpl.scala | 58 +++ .../platform/index/InMemoryStateUpdater.scala | 15 +- .../indexer/IndexerServiceOwner.scala | 6 +- .../canton/platform/indexer/JdbcIndexer.scala | 2 + .../BatchingParallelIngestionPipe.scala | 11 +- .../ParallelIndexerSubscription.scala | 91 ++-- .../canton/platform/package.scala | 18 +- .../store/CompletionFromTransaction.scala | 6 +- .../platform/store/FlywayMigrations.scala | 4 +- .../store/backend/UpdateToMeteringDbDto.scala | 15 +- .../common/CommonStorageBackendFactory.scala | 3 - .../CompletionStorageBackendTemplate.scala | 2 +- .../backend/common/EventReaderQueries.scala | 2 +- .../common/EventStorageBackendTemplate.scala | 2 +- .../common/MeteringStorageBackendImpl.scala | 2 +- .../common/ParameterStorageBackendImpl.scala | 4 +- .../common/PartyStorageBackendTemplate.scala | 2 +- .../store/backend/common/QueryStrategy.scala | 4 + ...ctorOf.scala => SimpleSqlExtensions.scala} | 7 +- .../StringInterningStorageBackendImpl.scala | 2 +- .../common/TransactionPointwiseQueries.scala | 2 +- .../common/TransactionStreamingQueries.scala | 2 +- .../backend/h2/H2EventStorageBackend.scala | 9 +- .../backend/h2/H2StorageBackendFactory.scala | 7 + .../IdentityProviderStorageBackendImpl.scala | 2 +- .../ParticipantMetadataBackend.scala | 2 +- .../PartyRecordStorageBackendImpl.scala | 4 +- .../UserManagementStorageBackendImpl.scala | 2 +- .../oracle/OracleEventStorageBackend.scala | 9 +- .../oracle/OracleStorageBackendFactory.scala | 6 + .../PostgresEventStorageBackend.scala | 9 +- .../postgresql/PostgresQueryStrategy.scala | 7 + .../PostgresStorageBackendFactory.scala | 4 + .../TransactionLogUpdatesConversions.scala | 4 +- .../interfaces/TransactionLogUpdate.scala | 2 +- .../canton/platform/InMemoryStateSpec.scala | 3 + .../canton/platform/IndexComponentTest.scala | 3 + .../StoreBackedCommandExecutorSpec.scala | 6 +- .../ApiCommandSubmissionServiceSpec.scala | 2 + .../ApiPackageManagementServiceSpec.scala | 67 ++- .../index/InMemoryStateUpdaterSpec.scala | 8 +- .../RecoveringIndexerIntegrationSpec.scala | 3 + .../indexer/ha/EndlessReadService.scala | 4 +- .../ha/IndexerStabilityTestFixture.scala | 3 + .../BatchingParallelIngestionPipeSpec.scala | 20 +- .../ParallelIndexerSubscriptionSpec.scala | 1 - .../store/backend/PruningDtoQueries.scala | 2 +- .../store/backend/StorageBackendSpec.scala | 2 - .../store/backend/UpdateToDbDtoSpec.scala | 1 - .../backend/UpdateToMeteringDbDtoSpec.scala | 57 ++- .../cache/InMemoryFanoutBufferSpec.scala | 4 +- .../store/dao/BufferedStreamsReaderSpec.scala | 5 +- .../BufferedTransactionByIdReaderSpec.scala | 4 +- .../store/dao/JdbcLedgerDaoBackend.scala | 5 +- .../dao/JdbcLedgerDaoCompletionsSpec.scala | 4 - .../store/dao/JdbcLedgerDaoSuite.scala | 10 +- .../indexerbenchmark/IndexerBenchmark.scala | 5 +- .../src/main/daml/carbonv1/daml.yaml | 2 +- .../src/main/daml/carbonv2/daml.yaml | 2 +- .../src/main/daml/experimental/daml.yaml | 2 +- .../src/main/daml/model/daml.yaml | 2 +- .../main/daml/package_management/daml.yaml | 2 +- .../main/daml/semantic/DivulgenceTests.daml | 13 + .../src/main/daml/semantic/daml.yaml | 2 +- .../src/main/daml/upgrade/1.0.0/daml.yaml | 2 +- .../src/main/daml/upgrade/2.0.0/daml.yaml | 2 +- .../src/main/daml/upgrade/3.0.0/daml.yaml | 2 +- .../canton/http/json2/JsVersionService.scala | 1 + .../src/test/daml/v2_1/daml.yaml | 2 +- .../src/test/daml/v2_dev/daml.yaml | 2 +- community/participant/src/main/daml/daml.yaml | 2 +- .../CantonLedgerApiServerFactory.scala | 5 +- .../canton/participant/GlobalOffset.scala | 10 + .../canton/participant/ParticipantNode.scala | 50 +- .../ParticipantNodeParameters.scala | 5 + .../admin/grpc/GrpcInspectionService.scala | 24 +- .../inspection/SyncStateInspection.scala | 35 +- .../config/LocalParticipantConfig.scala | 7 + .../api/CantonLedgerApiServerWrapper.scala | 36 +- .../StartableStoppableLedgerApiServer.scala | 28 +- .../protocol/MessageDispatcher.scala | 5 +- .../protocol/TransactionProcessingSteps.scala | 22 +- .../protocol/TransactionProcessor.scala | 3 + .../transfer/TransferInProcessingSteps.scala | 1 - .../transfer/TransferOutProcessingSteps.scala | 1 - .../transfer/TransferProcessingSteps.scala | 2 - .../validation/RecipientsValidator.scala | 6 +- .../store/MultiDomainEventLog.scala | 7 +- .../store/SerializableLedgerSyncEvent.scala | 6 - .../store/db/DbMultiDomainEventLog.scala | 21 +- .../memory/InMemoryMultiDomainEventLog.scala | 16 +- .../participant/sync/CantonSyncService.scala | 63 ++- .../sync/CommandProgressTrackerImpl.scala | 353 ++++++++++++++ .../canton/participant/sync/SyncDomain.scala | 10 +- .../ParticipantTopologyDispatcher.scala | 4 +- .../canton/participant/util/DAMLe.scala | 21 +- .../DefaultParticipantStateValues.scala | 2 - .../admin/GrpcTrafficControlServiceTest.scala | 1 + .../admin/PackageServiceTest.scala | 2 +- .../protocol/MessageDispatcherTest.scala | 90 +++- .../protocol/ProtocolProcessorTest.scala | 1 - .../TopologyTransactionsToEventsTest.scala | 2 +- .../TransactionProcessingStepsTest.scala | 2 + .../protocol/transfer/DAMLeTestInstance.scala | 2 +- .../sync/CantonSyncServiceTest.scala | 69 +-- .../LedgerServerPartyNotifierTest.scala | 4 +- .../topology/QueueBasedDomainOutboxTest.scala | 2 +- .../topology/StoreBasedDomainOutboxTest.scala | 2 +- .../canton/MockedNodeParameters.scala | 2 + .../canton/ProtocolVersionChecks.scala | 14 +- .../topology/TestingIdentityFactory.scala | 21 +- .../telemetry/OpenTelemetryFactory.scala | 28 +- project/BuildCommon.scala | 3 +- project/Dependencies.scala | 4 +- project/project/DamlVersions.scala | 2 +- 255 files changed, 4433 insertions(+), 2053 deletions(-) create mode 100644 community/app/src/test/resources/documentation-snippets/beta-version-support.conf create mode 100644 community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/PackageVersion.java create mode 100644 community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/PackageVersionSpec.scala rename community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/{ConfirmationResponseProcessor.scala => ConfirmationRequestAndResponseProcessor.scala} (99%) rename community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/{ConfirmationResponseProcessorTest.scala => ConfirmationRequestAndResponseProcessorTest.scala} (97%) create mode 100644 community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/command_inspection_service.proto create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandInspectionServiceAuthorization.scala create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandInspectionService.scala create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandInspectionServiceRequestValidator.scala create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/ResourceCloseable.scala create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/CommandProgressTracker.scala create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiCommandInspectionService.scala create mode 100644 community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandInspectionServiceImpl.scala rename community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/{SimpleSqlAsVectorOf.scala => SimpleSqlExtensions.scala} (85%) create mode 100644 community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CommandProgressTrackerImpl.scala diff --git a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/inspection_service.proto b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/inspection_service.proto index 161cf9f12..123588f8a 100644 --- a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/inspection_service.proto +++ b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/inspection_service.proto @@ -18,8 +18,6 @@ service InspectionService { // Lookup the domain where a contract is currently active. // Supports querying many contracts at once. rpc LookupContractDomain(LookupContractDomain.Request) returns (LookupContractDomain.Response); - // Lookup the domain that the transaction was committed over. Can fail with NOT_FOUND if no domain was found. - rpc LookupTransactionDomain(LookupTransactionDomain.Request) returns (LookupTransactionDomain.Response); // Look up the ledger offset corresponding to the timestamp, specifically the largest offset such that no later // offset corresponds to a later timestamp than the specified one. rpc LookupOffsetByTime(LookupOffsetByTime.Request) returns (LookupOffsetByTime.Response); diff --git a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/traffic_control_service.proto b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/traffic_control_service.proto index 0e54114e0..1a8011101 100644 --- a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/traffic_control_service.proto +++ b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/traffic_control_service.proto @@ -31,8 +31,10 @@ message TrafficState { int64 extra_traffic_consumed = 2; // Amount of base traffic remaining int64 base_traffic_remainder = 3; + // Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0 + uint64 last_consumed_cost = 4; // Timestamp at which the state is valid - int64 timestamp = 4; + int64 timestamp = 5; // Optional serial of the balance update that updated the extra traffic limit - google.protobuf.UInt32Value serial = 5; + google.protobuf.UInt32Value serial = 6; } diff --git a/community/admin-api/src/main/protobuf/com/digitalasset/canton/version/ProtocolVersionAnnotation.scala b/community/admin-api/src/main/protobuf/com/digitalasset/canton/version/ProtocolVersionAnnotation.scala index d87593584..6fcf0b445 100644 --- a/community/admin-api/src/main/protobuf/com/digitalasset/canton/version/ProtocolVersionAnnotation.scala +++ b/community/admin-api/src/main/protobuf/com/digitalasset/canton/version/ProtocolVersionAnnotation.scala @@ -10,6 +10,9 @@ object ProtocolVersionAnnotation { /** Marker for stable protocol versions */ sealed trait Stable extends Status + + /** Marker for beta protocol versions */ + sealed trait Beta extends Status } /** Marker trait for Protobuf messages generated by scalapb diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala index 6b9f80000..d9f4b0b2b 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/LedgerApiCommands.scala @@ -4,6 +4,14 @@ package com.digitalasset.canton.admin.api.client.commands import cats.syntax.either.* +import cats.syntax.traverse.* +import com.daml.ledger.api.v2.admin.command_inspection_service.CommandInspectionServiceGrpc.CommandInspectionServiceStub +import com.daml.ledger.api.v2.admin.command_inspection_service.{ + CommandInspectionServiceGrpc, + CommandState, + GetCommandStatusRequest, + GetCommandStatusResponse, +} import com.daml.ledger.api.v2.admin.identity_provider_config_service.IdentityProviderConfigServiceGrpc.IdentityProviderConfigServiceStub import com.daml.ledger.api.v2.admin.identity_provider_config_service.* import com.daml.ledger.api.v2.admin.metering_report_service.MeteringReportServiceGrpc.MeteringReportServiceStub @@ -135,6 +143,7 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.networking.grpc.ForwardingStreamObserver +import com.digitalasset.canton.platform.apiserver.execution.CommandStatus import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.topology.{DomainId, PartyId} @@ -352,6 +361,34 @@ object LedgerApiCommands { } } + + object CommandInspectionService { + abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { + override type Svc = CommandInspectionServiceStub + + override def createService(channel: ManagedChannel): CommandInspectionServiceStub = + CommandInspectionServiceGrpc.stub(channel) + } + + final case class GetCommandStatus(commandIdPrefix: String, state: CommandState, limit: Int) + extends BaseCommand[GetCommandStatusRequest, GetCommandStatusResponse, Seq[CommandStatus]] { + override def createRequest(): Either[String, GetCommandStatusRequest] = Right( + GetCommandStatusRequest(commandIdPrefix = commandIdPrefix, state = state, limit = limit) + ) + + override def submitRequest( + service: CommandInspectionServiceStub, + request: GetCommandStatusRequest, + ): Future[GetCommandStatusResponse] = service.getCommandStatus(request) + + override def handleResponse( + response: GetCommandStatusResponse + ): Either[String, Seq[CommandStatus]] = { + response.commandStatus.traverse(CommandStatus.fromProto).leftMap(_.message) + } + } + } + object ParticipantPruningService { abstract class BaseCommand[Req, Resp, Res] extends GrpcAdminCommand[Req, Resp, Res] { override type Svc = ParticipantPruningServiceStub diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala index 70fd79ce1..d7032b55d 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala @@ -57,7 +57,7 @@ import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.BinaryFileUtil import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{DomainAlias, LedgerTransactionId, SequencerCounter, config} +import com.digitalasset.canton.{DomainAlias, SequencerCounter, config} import com.google.protobuf.ByteString import com.google.protobuf.empty.Empty import com.google.protobuf.timestamp.Timestamp @@ -1057,29 +1057,6 @@ object ParticipantAdminCommands { } - final case class LookupTransactionDomain(transactionId: LedgerTransactionId) - extends Base[ - v30.LookupTransactionDomain.Request, - v30.LookupTransactionDomain.Response, - DomainId, - ] { - override def createRequest() = Right(v30.LookupTransactionDomain.Request(transactionId)) - - override def submitRequest( - service: InspectionServiceStub, - request: v30.LookupTransactionDomain.Request, - ): Future[v30.LookupTransactionDomain.Response] = - service.lookupTransactionDomain(request) - - override def handleResponse( - response: v30.LookupTransactionDomain.Response - ): Either[String, DomainId] = - DomainId.fromString(response.domainId) - - override def timeoutType: TimeoutType = DefaultUnboundedTimeout - - } - final case class LookupOffsetByTime(ts: Timestamp) extends Base[v30.LookupOffsetByTime.Request, v30.LookupOffsetByTime.Response, String] { override def createRequest() = Right(v30.LookupOffsetByTime.Request(Some(ts))) diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index df01fec8a..fd9e10e83 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -70,6 +70,7 @@ import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.admin.AdminWorkflowConfig import com.digitalasset.canton.participant.config.ParticipantInitConfig.ParticipantLedgerApiInitConfig import com.digitalasset.canton.participant.config.* +import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig import com.digitalasset.canton.platform.apiserver.SeedService.Seeding import com.digitalasset.canton.platform.apiserver.configuration.{ EngineLoggingConfig, @@ -238,6 +239,7 @@ final case class RetentionPeriodDefaults( * @param startupParallelism Start up to N nodes in parallel (default is num-threads) * @param nonStandardConfig don't fail config validation on non-standard configuration settings * @param devVersionSupport If true, allow domain nodes to use unstable protocol versions and participant nodes to connect to such domains + * @param betaVersionSupport If true, allow domain nodes to use beta protocol versions and participant nodes to connect to such domains * @param timeouts Sets the timeouts used for processing and console * @param portsFile A ports file name, where the ports of all participants will be written to after startup * @param exitOnFatalFailures If true the node will exit/stop the process in case of fatal failures @@ -251,6 +253,7 @@ final case class CantonParameters( nonStandardConfig: Boolean = true, // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version devVersionSupport: Boolean = true, + betaVersionSupport: Boolean = false, portsFile: Option[String] = None, timeouts: TimeoutSettings = TimeoutSettings(), retentionPeriodDefaults: RetentionPeriodDefaults = RetentionPeriodDefaults(), @@ -378,6 +381,7 @@ trait CantonConfig { protocolConfig = ParticipantProtocolConfig( minimumProtocolVersion = participantParameters.minimumProtocolVersion.map(_.unwrap), devVersionSupport = participantParameters.devVersionSupport, + betaVersionSupport = participantParameters.BetaVersionSupport, dontWarnOnDeprecatedPV = participantParameters.dontWarnOnDeprecatedPV, ), ledgerApiServerParameters = participantParameters.ledgerApiServer, @@ -388,6 +392,7 @@ trait CantonConfig { disableUpgradeValidation = participantParameters.disableUpgradeValidation, allowForUnauthenticatedContractIds = participantParameters.allowForUnauthenticatedContractIds, + commandProgressTracking = participantParameters.commandProgressTracker, ) } @@ -519,6 +524,7 @@ private[canton] object CantonNodeParameterConverter { def protocol(parent: CantonConfig, config: ProtocolConfig): CantonNodeParameters.Protocol = CantonNodeParameters.Protocol.Impl( devVersionSupport = parent.parameters.devVersionSupport || config.devVersionSupport, + betaVersionSupport = parent.parameters.betaVersionSupport || config.betaVersionSupport, dontWarnOnDeprecatedPV = config.dontWarnOnDeprecatedPV, ) @@ -971,9 +977,12 @@ object CantonConfig { deriveReader[EngineLoggingConfig] lazy implicit val cantonEngineConfigReader: ConfigReader[CantonEngineConfig] = deriveReader[CantonEngineConfig] - lazy implicit val participantNodeParameterConfigReader - : ConfigReader[ParticipantNodeParameterConfig] = + @nowarn("cat=unused") lazy implicit val participantNodeParameterConfigReader + : ConfigReader[ParticipantNodeParameterConfig] = { + implicit val commandProgressTrackerConfigReader: ConfigReader[CommandProgressTrackerConfig] = + deriveReader[CommandProgressTrackerConfig] deriveReader[ParticipantNodeParameterConfig] + } lazy implicit val timeTrackerConfigReader: ConfigReader[DomainTimeTrackerConfig] = deriveReader[DomainTimeTrackerConfig] lazy implicit val timeRequestConfigReader: ConfigReader[TimeProofRequestConfig] = @@ -1385,9 +1394,12 @@ object CantonConfig { deriveWriter[EngineLoggingConfig] lazy implicit val cantonEngineConfigWriter: ConfigWriter[CantonEngineConfig] = deriveWriter[CantonEngineConfig] - lazy implicit val participantNodeParameterConfigWriter - : ConfigWriter[ParticipantNodeParameterConfig] = + @nowarn("cat=unused") lazy implicit val participantNodeParameterConfigWriter + : ConfigWriter[ParticipantNodeParameterConfig] = { + implicit val commandProgressTrackerConfigWriter: ConfigWriter[CommandProgressTrackerConfig] = + deriveWriter[CommandProgressTrackerConfig] deriveWriter[ParticipantNodeParameterConfig] + } lazy implicit val timeTrackerConfigWriter: ConfigWriter[DomainTimeTrackerConfig] = deriveWriter[DomainTimeTrackerConfig] lazy implicit val timeRequestConfigWriter: ConfigWriter[TimeProofRequestConfig] = diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala b/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala index 89c9dccdb..6ffe75196 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala @@ -9,7 +9,6 @@ import cats.syntax.functor.* import cats.syntax.functorFilter.* import com.daml.nonempty.NonEmpty import com.daml.nonempty.catsinstances.* -import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.version.HandshakeErrors.DeprecatedProtocolVersion @@ -53,19 +52,17 @@ object CommunityConfigValidations type Validation = CantonCommunityConfig => Validated[NonEmpty[Seq[String]], Unit] override protected val validations: List[Validation] = - List[Validation](noDuplicateStorage, atLeastOneNode) ++ genericValidations[ - CantonCommunityConfig - ] + List[Validation](noDuplicateStorage, atLeastOneNode) ++ + genericValidations[CantonCommunityConfig] /** Validations applied to all community and enterprise Canton configurations. */ private[config] def genericValidations[C <: CantonConfig] - : List[C => Validated[NonEmpty[Seq[String]], Unit]] = { + : List[C => Validated[NonEmpty[Seq[String]], Unit]] = List( developmentProtocolSafetyCheck, warnIfUnsafeMinProtocolVersion, adminTokenSafetyCheckParticipants, ) - } /** Group node configs by db access to find matching db storage configs. * Overcomplicated types used are to work around that at this point nodes could have conflicting names so we can't just @@ -207,19 +204,22 @@ object CommunityConfigValidations devVersionSupport = nodeConfig.parameters.devVersionSupport, ) } + } private def warnIfUnsafeMinProtocolVersion( config: CantonConfig ): Validated[NonEmpty[Seq[String]], Unit] = { - config.participants.toSeq.foreach { case (name, config) => + val errors = config.participants.toSeq.mapFilter { case (name, config) => val minimum = config.parameters.minimumProtocolVersion.map(_.unwrap) val isMinimumDeprecatedVersion = minimum.getOrElse(ProtocolVersion.minimum).isDeprecated - if (isMinimumDeprecatedVersion && !config.parameters.dontWarnOnDeprecatedPV) - DeprecatedProtocolVersion.WarnParticipant(name, minimum).discard + Option.when(isMinimumDeprecatedVersion && !config.parameters.dontWarnOnDeprecatedPV)( + DeprecatedProtocolVersion.WarnParticipant(name, minimum).cause + ) } - Validated.valid(()) + + NonEmpty.from(errors).map(Validated.invalid).getOrElse(Validated.valid(())) } private def adminTokenSafetyCheckParticipants( diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala index 358006d1f..c662fd14f 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala @@ -401,9 +401,6 @@ class ExternalLedgerApiClient( override val loggerFactory: NamedLoggerFactory = consoleEnvironment.environment.loggerFactory.append("client", name) - override protected def domainOfTransaction(transactionId: String): DomainId = - throw new NotImplementedError("domain_of is not implemented for external ledger api clients") - override protected[console] def ledgerApiCommand[Result]( command: GrpcAdminCommand[?, ?, Result] ): ConsoleCommandResult[Result] = @@ -413,6 +410,7 @@ class ExternalLedgerApiClient( override protected def optionallyAwait[Tx]( tx: Tx, txId: String, + txDomainId: String, optTimeout: Option[NonNegativeDuration], ): Tx = tx diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala index a4b53c4e4..aefbd7dec 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala @@ -8,6 +8,7 @@ import cats.syntax.functorFilter.* import cats.syntax.traverse.* import com.daml.jwt.JwtDecoder import com.daml.jwt.domain.Jwt +import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState import com.daml.ledger.api.v2.admin.package_management_service.PackageDetails import com.daml.ledger.api.v2.admin.party_management_service.PartyDetails as ProtoPartyDetails import com.daml.ledger.api.v2.checkpoint.Checkpoint @@ -42,6 +43,7 @@ import com.daml.ledger.javaapi as javab import com.daml.lf.data.Ref import com.daml.metrics.api.MetricsContext import com.daml.scalautil.Statement.discard +import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.CompletionWrapper import com.digitalasset.canton.admin.api.client.commands.LedgerApiCommands.UpdateService.* import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.{ @@ -49,10 +51,6 @@ import com.digitalasset.canton.admin.api.client.commands.LedgerApiTypeWrappers.{ WrappedIncompleteAssigned, WrappedIncompleteUnassigned, } -import com.digitalasset.canton.admin.api.client.commands.{ - LedgerApiCommands, - ParticipantAdminCommands, -} import com.digitalasset.canton.admin.api.client.data.* import com.digitalasset.canton.config.ConsoleCommandTimeout import com.digitalasset.canton.config.RequireTypes.PositiveInt @@ -83,11 +81,12 @@ import com.digitalasset.canton.ledger.client.services.admin.IdentityProviderConf import com.digitalasset.canton.logging.NamedLogging import com.digitalasset.canton.networking.grpc.{GrpcError, RecordingStreamObserver} import com.digitalasset.canton.participant.ledger.api.client.JavaDecodeUtil +import com.digitalasset.canton.platform.apiserver.execution.CommandStatus import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId} import com.digitalasset.canton.tracing.NoTracing import com.digitalasset.canton.util.ResourceUtil -import com.digitalasset.canton.{LedgerTransactionId, LfPackageId, LfPartyId, config} +import com.digitalasset.canton.{LfPackageId, LfPartyId, config} import com.google.protobuf.field_mask.FieldMask import io.grpc.StatusRuntimeException import io.grpc.stub.StreamObserver @@ -118,10 +117,10 @@ trait BaseLedgerApiAdministration extends NoTracing { } .getOrElse(LedgerApiCommands.defaultApplicationId) - protected def domainOfTransaction(transactionId: String): DomainId protected def optionallyAwait[Tx]( tx: Tx, txId: String, + txDomainId: String, optTimeout: Option[config.NonNegativeDuration], ): Tx private def timeouts: ConsoleCommandTimeout = consoleEnvironment.commandTimeouts @@ -402,13 +401,6 @@ trait BaseLedgerApiAdministration extends NoTracing { ) ) }) - - @Help.Summary("Get the domain that a transaction was committed over.") - @Help.Description( - """Get the domain that a transaction was committed over. Throws an error if the transaction is not (yet) known - |to the participant or if the transaction has been pruned via `pruning.prune`.""" - ) - def domain_of(transactionId: String): DomainId = domainOfTransaction(transactionId) } @Help.Summary("Submit commands", FeatureFlag.Testing) @@ -462,7 +454,7 @@ trait BaseLedgerApiAdministration extends NoTracing { ) ) } - optionallyAwait(tx, tx.updateId, optTimeout) + optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout) } @Help.Summary( @@ -512,7 +504,7 @@ trait BaseLedgerApiAdministration extends NoTracing { ) ) } - optionallyAwait(tx, tx.updateId, optTimeout) + optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout) } @Help.Summary("Submit command asynchronously", FeatureFlag.Testing) @@ -554,6 +546,36 @@ trait BaseLedgerApiAdministration extends NoTracing { } } + @Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing) + @Help.Description( + """Find the status of commands. Note that only recent commands which are kept in memory will be returned.""" + ) + def status( + commandIdPrefix: String = "", + state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED, + limit: PositiveInt = PositiveInt.tryCreate(10), + ): Seq[CommandStatus] = check(FeatureFlag.Preview) { + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandInspectionService.GetCommandStatus( + commandIdPrefix = commandIdPrefix, + state = state, + limit = limit.unwrap, + ) + ) + } + } + + @Help.Summary("Investigate failed commands", FeatureFlag.Testing) + @Help.Description( + """Same as status(..., state = CommandState.Failed).""" + ) + def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[ + CommandStatus + ] = check(FeatureFlag.Preview) { + status(commandId, CommandState.COMMAND_STATE_FAILED, limit) + } + @Help.Summary( "Submit assign command and wait for the resulting reassignment, returning the reassignment or failing otherwise", FeatureFlag.Testing, @@ -796,6 +818,36 @@ trait BaseLedgerApiAdministration extends NoTracing { ) }) + @Help.Summary("Investigate successful and failed commands", FeatureFlag.Testing) + @Help.Description( + """Find the status of commands. Note that only recent commands which are kept in memory will be returned.""" + ) + def status( + commandIdPrefix: String = "", + state: CommandState = CommandState.COMMAND_STATE_UNSPECIFIED, + limit: PositiveInt = PositiveInt.tryCreate(10), + ): Seq[CommandStatus] = check(FeatureFlag.Preview) { + consoleEnvironment.run { + ledgerApiCommand( + LedgerApiCommands.CommandInspectionService.GetCommandStatus( + commandIdPrefix = commandIdPrefix, + state = state, + limit = limit.unwrap, + ) + ) + } + } + + @Help.Summary("Investigate failed commands", FeatureFlag.Testing) + @Help.Description( + """Same as status(..., state = CommandState.Failed).""" + ) + def failed(commandId: String = "", limit: PositiveInt = PositiveInt.tryCreate(10)): Seq[ + CommandStatus + ] = check(FeatureFlag.Preview) { + status(commandId, CommandState.COMMAND_STATE_FAILED, limit) + } + @Help.Summary("Read active contracts", FeatureFlag.Testing) @Help.Group("Active Contracts") object acs extends Helpful { @@ -1788,7 +1840,9 @@ trait BaseLedgerApiAdministration extends NoTracing { ) } javab.data.TransactionTree.fromProto( - TransactionTreeProto.toJavaProto(optionallyAwait(tx, tx.updateId, optTimeout)) + TransactionTreeProto.toJavaProto( + optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout) + ) ) } @@ -1841,7 +1895,7 @@ trait BaseLedgerApiAdministration extends NoTracing { ) } javab.data.Transaction.fromProto( - TransactionV2.toJavaProto(optionallyAwait(tx, tx.updateId, optTimeout)) + TransactionV2.toJavaProto(optionallyAwait(tx, tx.updateId, tx.domainId, optTimeout)) ) } @@ -2248,13 +2302,6 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration { implicit protected val consoleEnvironment: ConsoleEnvironment protected val name: String - override protected def domainOfTransaction(transactionId: String): DomainId = { - val txId = LedgerTransactionId.assertFromString(transactionId) - consoleEnvironment.run { - adminCommand(ParticipantAdminCommands.Inspection.LookupTransactionDomain(txId)) - } - } - import com.digitalasset.canton.util.ShowUtil.* private def awaitTransaction( @@ -2283,9 +2330,10 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration { } private[console] def involvedParticipants( - transactionId: String + transactionId: String, + txDomainId: String, ): Map[ParticipantReference, PartyId] = { - val txDomain = ledger_api.updates.domain_of(transactionId) + val txDomain = DomainId.tryFromString(txDomainId) // TODO(#6317) // There's a race condition here, in the unlikely circumstance that the party->participant mapping on the domain // changes during the command's execution. We'll have to live with it for the moment, as there's no convenient @@ -2348,12 +2396,13 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration { protected def optionallyAwait[Tx]( tx: Tx, txId: String, + txDomainId: String, optTimeout: Option[config.NonNegativeDuration], ): Tx = { optTimeout match { case None => tx case Some(timeout) => - val involved = involvedParticipants(txId) + val involved = involvedParticipants(txId, txDomainId) logger.debug(show"Awaiting transaction ${txId.unquoted} at ${involved.keys.mkShow()}") awaitTransaction(txId, involved, timeout) tx diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala index a460e9c41..813683349 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala @@ -96,6 +96,7 @@ import com.digitalasset.canton.util.* import com.digitalasset.canton.{DomainAlias, SequencerAlias, config} import java.time.Instant +import scala.annotation.nowarn import scala.concurrent.duration.Duration sealed trait DomainChoice @@ -430,6 +431,7 @@ class LocalParticipantTestingGroup( This is because the combined event log isn't guaranteed to have increasing timestamps. """ ) + @nowarn("msg=usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer") def event_search( domain: Option[DomainAlias] = None, from: Option[Instant] = None, @@ -461,6 +463,7 @@ class LocalParticipantTestingGroup( Note that if the domain is left blank, the values of `from` and `to` cannot be set. This is because the combined event log isn't guaranteed to have increasing timestamps. """) + @nowarn("msg=usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer") def transaction_search( domain: Option[DomainAlias] = None, from: Option[Instant] = None, diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala index be19e77cd..e0857864c 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/PartiesAdministration.scala @@ -309,7 +309,7 @@ class ParticipantPartiesAdministrationGroup( TopologyAdminCommands.Write.Propose( // TODO(#14048) properly set the serial or introduce auto-detection so we don't // have to set it on the client side - mapping = PartyToParticipant( + mapping = PartyToParticipant.create( partyId, None, threshold, @@ -326,6 +326,8 @@ class ParticipantPartiesAdministrationGroup( serial = None, store = AuthorizedStore.filterName, mustFullyAuthorize = mustFullyAuthorize, + change = TopologyChangeOp.Replace, + forceChanges = ForceFlags.none, ) ) } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala index 580ad648c..d08443cef 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministration.scala @@ -1209,6 +1209,29 @@ class TopologyAdministrationGroup( @Help.Group("Party to participant mappings") object party_to_participant_mappings extends Helpful { + private def findCurrent(party: PartyId, store: String) = { + TopologyStoreId(store) match { + case TopologyStoreId.DomainStore(domainId, _) => + expectAtMostOneResult( + list( + domainId, + filterParty = party.filterString, + // fetch both REPLACE and REMOVE to correctly determine the next serial + operation = None, + ) + ) + + case TopologyStoreId.AuthorizedStore => + expectAtMostOneResult( + list_from_authorized( + filterParty = party.filterString, + // fetch both REPLACE and REMOVE to correctly determine the next serial + operation = None, + ) + ) + } + } + @Help.Summary("Change party to participant mapping") @Help.Description("""Change the association of a party to hosting participants. party: The unique identifier of the party whose set of participants or permission to modify. @@ -1244,27 +1267,7 @@ class TopologyAdministrationGroup( store: String = AuthorizedStore.filterName, ): SignedTopologyTransaction[TopologyChangeOp, PartyToParticipant] = { - val currentO = TopologyStoreId(store) match { - case TopologyStoreId.DomainStore(domainId, _) => - expectAtMostOneResult( - list( - domainId, - filterParty = party.filterString, - // fetch both REPLACE and REMOVE to correctly determine the next serial - operation = None, - ) - ) - - case TopologyStoreId.AuthorizedStore => - expectAtMostOneResult( - list_from_authorized( - filterParty = party.filterString, - // fetch both REPLACE and REMOVE to correctly determine the next serial - operation = None, - ) - ) - } - + val currentO = findCurrent(party, store) val (existingPermissions, newSerial, threshold, groupAddressing) = currentO match { case Some(current) if current.context.operation == TopologyChangeOp.Remove => ( @@ -1361,7 +1364,7 @@ class TopologyAdministrationGroup( } val command = TopologyAdminCommands.Write.Propose( - mapping = PartyToParticipant( + mapping = PartyToParticipant.create( partyId = party, domainId = domainId, threshold = threshold, @@ -1373,6 +1376,7 @@ class TopologyAdministrationGroup( change = op, mustFullyAuthorize = mustFullyAuthorize, store = store, + forceChanges = ForceFlags.none, ) synchronisation.runAdminCommand(synchronize)(command) @@ -1969,13 +1973,16 @@ class TopologyAdministrationGroup( ), ): SignedTopologyTransaction[TopologyChangeOp, AuthorityOf] = { - val command = TopologyAdminCommands.Write.Propose( - AuthorityOf( + val authorityOf = AuthorityOf + .create( partyId, domainId, PositiveInt.tryCreate(threshold), parties, - ), + ) + .valueOr(error => consoleEnvironment.run(GenericCommandError(error))) + val command = TopologyAdminCommands.Write.Propose( + authorityOf, signedBy = signedBy.toList, serial = serial, store = store, diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala b/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala index c530e6bd9..409835ad1 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala @@ -82,6 +82,7 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing { histogramInventory = histogramInventory, histogramFilter = baseFilter, histogramConfigs = config.monitoring.metrics.histograms, + config.monitoring.metrics.cardinality.unwrap, loggerFactory, ) } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala index 2066c2b7f..638c3ec17 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala @@ -12,13 +12,14 @@ import com.daml.metrics.api.{MetricQualification, MetricsContext, MetricsInfoFil import com.daml.metrics.grpc.DamlGrpcServerMetrics import com.daml.metrics.{HealthMetrics, HistogramDefinition, MetricsFilterConfig} import com.digitalasset.canton.config.NonNegativeFiniteDuration -import com.digitalasset.canton.config.RequireTypes.Port +import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.domain.metrics.{MediatorMetrics, SequencerMetrics} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.MetricsConfig.JvmMetrics import com.digitalasset.canton.metrics.MetricsReporterConfig.{Csv, Logging, Prometheus} import com.digitalasset.canton.participant.metrics.ParticipantMetrics +import com.digitalasset.canton.telemetry.OpenTelemetryFactory import com.typesafe.scalalogging.LazyLogging import io.opentelemetry.api.OpenTelemetry import io.opentelemetry.api.metrics.Meter @@ -26,6 +27,7 @@ import io.opentelemetry.exporter.prometheus.PrometheusHttpServer import io.opentelemetry.instrumentation.runtimemetrics.java8.* import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder import io.opentelemetry.sdk.metrics.`export`.{MetricExporter, MetricReader, PeriodicMetricReader} +import io.opentelemetry.sdk.metrics.internal.state.MetricStorage import java.io.File import java.util.concurrent.ScheduledExecutorService @@ -43,6 +45,7 @@ final case class MetricsConfig( reporters: Seq[MetricsReporterConfig] = Seq.empty, jvmMetrics: Option[JvmMetrics] = None, histograms: Seq[HistogramDefinition] = Seq.empty, + cardinality: PositiveInt = PositiveInt.tryCreate(MetricStorage.DEFAULT_MAX_CARDINALITY), qualifiers: Seq[MetricQualification] = Seq[MetricQualification]( MetricQualification.Errors, MetricQualification.Latency, @@ -267,10 +270,15 @@ object MetricsRegistry extends LazyLogging { } .zip(config.reporters) - .foreach { case (reader, config) => - sdkMeterProviderBuilder - .registerMetricReader(FilteringMetricsReader.create(config.filters, reader)) + .foreach { case (reader, readerConfig) => + OpenTelemetryFactory + .registerMetricsReaderWithCardinality( + sdkMeterProviderBuilder, + FilteringMetricsReader.create(readerConfig.filters, reader), + config.cardinality.unwrap, + ) .discard + } sdkMeterProviderBuilder } diff --git a/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala b/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala index 9690dc8a7..abbb55aee 100644 --- a/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala +++ b/community/app/src/main/scala/com/digitalasset/canton/CantonAppDriver.scala @@ -39,7 +39,8 @@ abstract class CantonAppDriver[E <: Environment] extends App with NamedLogging w (Map( "Canton" -> BuildInfo.version, "Daml Libraries" -> BuildInfo.damlLibrariesVersion, - "Supported Canton protocol versions" -> BuildInfo.protocolVersions.toString(), + "Stable Canton protocol versions" -> BuildInfo.stableProtocolVersions.toString(), + "Preview Canton protocol versions" -> BuildInfo.betaProtocolVersions.toString(), ) ++ additionalVersions) foreach { case (name, version) => Console.out.println(s"$name: $version") } diff --git a/community/app/src/test/resources/documentation-snippets/beta-version-support.conf b/community/app/src/test/resources/documentation-snippets/beta-version-support.conf new file mode 100644 index 000000000..4d2d8018c --- /dev/null +++ b/community/app/src/test/resources/documentation-snippets/beta-version-support.conf @@ -0,0 +1,9 @@ +canton.parameters { + # turn on support of beta version support for domain nodes + beta-version-support = yes +} + +canton.participants.participant1.parameters = { + # enable beta version on the participant (this will allow the participant to connect to a domain with beta protocol version) + beta-version-support = yes +} diff --git a/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala b/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala index 8b69e131d..6cddcbd69 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/environment/NodesTest.scala @@ -97,6 +97,7 @@ class NodesTest extends FixtureAnyWordSpec with BaseTest with HasExecutionContex dbMigrateAndStart: Boolean = false, disableUpgradeValidation: Boolean = false, devVersionSupport: Boolean = false, + betaVersionSupport: Boolean = false, dontWarnOnDeprecatedPV: Boolean = false, initialProtocolVersion: ProtocolVersion = testedProtocolVersion, exitOnFatalFailures: Boolean = true, diff --git a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala index 632de3db6..39a82aa05 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala @@ -90,7 +90,7 @@ class CliIntegrationTest extends FixtureAnyWordSpec with BaseTest with SuiteMixi s"$cantonBin --version" ! processLogger checkOutput( processLogger, - shouldContain = Seq("Canton", "Daml Libraries", BuildInfo.protocolVersions.toString), + shouldContain = Seq("Canton", "Daml Libraries", BuildInfo.stableProtocolVersions.toString), ) } diff --git a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto index 273c58215..00178cb42 100644 --- a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto +++ b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto @@ -52,8 +52,10 @@ message TrafficConsumed { uint64 extra_traffic_consumed = 2; // Remaining free base traffic uint64 base_traffic_remainder = 3; + // Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0 + uint64 last_consumed_cost = 4; // Timestamp at which this state is valid - this timestamp is used to compute the base traffic remainder above - int64 sequencing_timestamp = 4; // in microseconds of UTC time since Unix epoch + int64 sequencing_timestamp = 5; // in microseconds of UTC time since Unix epoch } // Message representing a traffic purchase made on behalf of a member @@ -77,10 +79,12 @@ message TrafficState { int64 extra_traffic_consumed = 2; // Amount of base traffic remaining int64 base_traffic_remainder = 3; + // Cost deducted at `timestamp`, only present when traffic was consumed at `timestamp`, otherwise is set to 0 + uint64 last_consumed_cost = 4; // Timestamp at which the state is valid - int64 timestamp = 4; + int64 timestamp = 5; // Optional serial of the balance update that updated the extra traffic limit - google.protobuf.UInt32Value serial = 5; + google.protobuf.UInt32Value serial = 6; } message SetTrafficPurchasedMessage { diff --git a/community/base/src/main/scala/com/digitalasset/canton/data/Offset.scala b/community/base/src/main/scala/com/digitalasset/canton/data/Offset.scala index 84069d706..cb7732085 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/data/Offset.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/data/Offset.scala @@ -5,9 +5,11 @@ package com.digitalasset.canton.data import com.daml.lf.data.{Bytes, Ref} import com.daml.logging.entries.{LoggingValue, ToLoggingValue} +import com.digitalasset.canton.data.Offset.beforeBegin import com.google.protobuf.ByteString import java.io.InputStream +import java.nio.{ByteBuffer, ByteOrder} /** Offsets into streams with hierarchical addressing. * @@ -31,10 +33,16 @@ final case class Offset(bytes: Bytes) extends Ordered[Offset] { def toByteArray: Array[Byte] = bytes.toByteArray def toHexString: Ref.HexString = bytes.toHexString + + def toLong: Long = + if (this == beforeBegin) 0L + else ByteBuffer.wrap(bytes.toByteArray).getLong(1) } object Offset { val beforeBegin: Offset = new Offset(Bytes.Empty) + private val longBasedByteLength: Int = 9 // One byte for the version plus 8 bytes for Long + private val versionUpstreamOffsetsAsLong: Byte = 0 def fromByteString(bytes: ByteString) = new Offset(Bytes.fromByteString(bytes)) @@ -44,6 +52,21 @@ object Offset { def fromHexString(s: Ref.HexString) = new Offset(Bytes.fromHexString(s)) + def fromLong(l: Long): Offset = + if (l == 0L) beforeBegin + else + Offset( + com.daml.lf.data.Bytes.fromByteString( + ByteString.copyFrom( + ByteBuffer + .allocate(longBasedByteLength) + .order(ByteOrder.BIG_ENDIAN) + .put(0, versionUpstreamOffsetsAsLong) + .putLong(1, l) + ) + ) + ) + implicit val `Offset to LoggingValue`: ToLoggingValue[Offset] = value => LoggingValue.OfString(value.toHexString) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala b/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala index bff023e90..2b4976781 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/environment/CantonNodeParameters.scala @@ -56,12 +56,14 @@ object CantonNodeParameters { } trait Protocol { def devVersionSupport: Boolean + def betaVersionSupport: Boolean def dontWarnOnDeprecatedPV: Boolean } object Protocol { final case class Impl( devVersionSupport: Boolean, + betaVersionSupport: Boolean, dontWarnOnDeprecatedPV: Boolean, ) extends CantonNodeParameters.Protocol } @@ -94,5 +96,6 @@ trait HasProtocolCantonNodeParameters extends CantonNodeParameters.Protocol { protected def protocol: CantonNodeParameters.Protocol def devVersionSupport: Boolean = protocol.devVersionSupport + def betaVersionSupport: Boolean = protocol.betaVersionSupport def dontWarnOnDeprecatedPV: Boolean = protocol.dontWarnOnDeprecatedPV } diff --git a/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala b/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala index 045f089cb..51c12fc05 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala @@ -18,6 +18,7 @@ import com.digitalasset.canton.{ DoNotTraverseLikeFuture, } +import java.util.concurrent.CompletionException import scala.concurrent.{Awaitable, ExecutionContext, Future} import scala.util.chaining.* import scala.util.{Failure, Success, Try} @@ -81,8 +82,14 @@ object FutureUnlessShutdown { apply(f.transform({ case Success(value) => Success(UnlessShutdown.Outcome(value)) case Failure(AbortedDueToShutdownException(_)) => Success(UnlessShutdown.AbortedDueToShutdown) + case Failure(ce: CompletionException) => + ce.getCause match { + case AbortedDueToShutdownException(_) => Success(UnlessShutdown.AbortedDueToShutdown) + case _ => Failure(ce) + } case Failure(other) => Failure(other) })) + } /** Monad combination of `Future` and [[UnlessShutdown]] diff --git a/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala b/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala index 5e07a0446..e8273cdaf 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala @@ -293,13 +293,13 @@ object OnboardingRestriction { * Must be greater than `maxSequencingTime` specified by a participant, * practically also requires extra slack to allow clock skew between participant and sequencer. * @param onboardingRestriction current onboarding restrictions for participants - * @param catchUpParameters Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]]. - * Defined starting with protobuf version v2 and protocol version v30. - * If None, the catch-up mode is disabled: the participant does not trigger the - * catch-up mode when lagging behind. - * If not None, it specifies the number of reconciliation intervals that the - * participant skips in catch-up mode, and the number of catch-up intervals - * intervals a participant should lag behind in order to enter catch-up mode. + * @param acsCommitmentsCatchUpConfig Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]]. + * Defined starting with protobuf version v2 and protocol version v30. + * If None, the catch-up mode is disabled: the participant does not trigger the + * catch-up mode when lagging behind. + * If not None, it specifies the number of reconciliation intervals that the + * participant skips in catch-up mode, and the number of catch-up intervals + * intervals a participant should lag behind in order to enter catch-up mode. * * @throws DynamicDomainParameters$.InvalidDynamicDomainParameters * if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`. diff --git a/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala b/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala index f722f7354..c1c884f55 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/RootHashMessageRecipients.scala @@ -12,12 +12,22 @@ import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.util.{Checked, ErrorUtil} +import com.digitalasset.canton.util.{Checked, ErrorUtil, SetCover} import scala.concurrent.{ExecutionContext, Future} object RootHashMessageRecipients extends HasLoggerName { + /** Computes the list of recipients for the root hash messages of a confirmation request. + * Each recipient returned is either a participant or a group address + * [[com.digitalasset.canton.sequencing.protocol.ParticipantsOfParty]]. + * The group addresses can be overlapping, but a participant member recipient will only be present if it is + * not included in any of the group addresses. + * + * @param informees informees of the confirmation request + * @param ipsSnapshot topology snapshot used at submission time + * @return list of root hash message recipients + */ def rootHashRecipientsForInformees( informees: Set[LfPartyId], ipsSnapshot: TopologySnapshot, @@ -37,10 +47,10 @@ object RootHashMessageRecipients extends HasLoggerName { ) ) ) - groupAddressedInformees <- ipsSnapshot.partiesWithGroupAddressing(informeesList) - participantsOfGroupAddressedInformees <- ipsSnapshot.activeParticipantsOfParties( - groupAddressedInformees.toList - ) + participantsOfGroupAddressedInformees <- ipsSnapshot + .activeParticipantsOfPartiesWithGroupAddressing( + informeesList + ) } yield { // If there are several group-addressed informees with overlapping participants, // we actually look for a set cover. It doesn't matter which one we pick. @@ -86,28 +96,45 @@ object RootHashMessageRecipients extends HasLoggerName { } ++ directlyAddressedParticipants.map { participant => MemberRecipient(participant) -> Set(participant) } - // TODO(#13883) Use a set cover for the recipients instead of all of them - // SetCover.greedy(sets.toMap) - sets.map { case (recipient, _) => recipient }.toSeq + SetCover.greedy(sets) } } + /** Validate the recipients of root hash messages received by a participant in Phase 3. + */ def validateRecipientsOnParticipant(recipients: Recipients): Checked[Nothing, String, Unit] = { - recipients.asSingleGroup match { - case Some(group) if group.sizeCompare(2) == 0 => - // group members must be participantId and mediator, due to previous checks - Checked.unit - case Some(group) => - val hasGroupAddressing = group.collect { case ParticipantsOfParty(party) => - party.toLf - }.nonEmpty - if (hasGroupAddressing) Checked.unit - else Checked.continue(s"The root hash message has an invalid recipient group.\n$recipients") - case _ => - Checked.continue(s"The root hash message has more than one recipient group.\n$recipients") + // group members must be of size 2, which must be participant and mediator, due to previous checks + val validGroups = recipients.trees.collect { + case RecipientsTree(group, Seq()) if group.sizeCompare(2) == 0 => group } + + if (validGroups.size == recipients.trees.size) { + val allUseGroupAddressing = validGroups.forall { + _.exists { + case ParticipantsOfParty(_) => true + case _ => false + } + } + + // Due to how rootHashRecipientsForInformees() computes recipients, if there is more than one group, + // they must all address the participant using group addressing. + if (allUseGroupAddressing || validGroups.sizeCompare(1) == 0) Checked.unit + else + Checked.continue( + s"The root hash message has more than one recipient group, not all using group addressing.\n$recipients" + ) + } else Checked.continue(s"The root hash message has invalid recipient groups.\n$recipients") } + /** Validate the recipients of root hash messages received by a mediator in Phase 2. + * + * A recipient is valid if each recipient tree: + * - contains only a single recipient group (no children) + * - the recipient group is if size 2 + * - the recipient group contains: + * - the mediator group recipient + * - either a participant member recipient or a PartyOfParticipant group recipient + */ def wrongAndCorrectRecipients( recipientsList: Seq[Recipients], mediator: MediatorGroupRecipient, @@ -115,18 +142,14 @@ object RootHashMessageRecipients extends HasLoggerName { val (wrongRecipients, correctRecipients) = recipientsList.flatMap { recipients => recipients.trees.toList.map { case tree @ RecipientsTree(group, Seq()) => - val participantCount = group.count { - case MemberRecipient(_: ParticipantId) => true - case _ => false - } - val groupAddressCount = group.count { - case ParticipantsOfParty(_) => true + val hasMediator = group.contains(mediator) + val hasParticipantOrPop = group.exists { + case MemberRecipient(_: ParticipantId) | ParticipantsOfParty(_) => true case _ => false } - val groupAddressingBeingUsed = groupAddressCount > 0 + Either.cond( - ((group.size == 2) || (groupAddressingBeingUsed && group.size >= 2)) && - group.contains(mediator) && (participantCount + groupAddressCount > 0), + group.sizeCompare(2) == 0 && hasMediator && hasParticipantOrPop, group, tree, ) diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala index 8dc4b6325..00a25d97c 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/transports/replay/ReplayingSendsSequencerClientTransport.scala @@ -296,7 +296,8 @@ abstract class ReplayingSendsSequencerClientTransportCommon( private def scheduleCheck(): Unit = { performUnlessClosing(functionFullName) { - val nextCheckDuration = idlenessDuration.toJava.minus(elapsed(stateRef.get())) + val nextCheckDuration = + idlenessDuration.toJava.minus(durationFromLastEventToNow(stateRef.get())) val _ = materializer.scheduleOnce(nextCheckDuration.toScala, () => checkIfIdle()) }.onShutdown(()) } @@ -315,25 +316,35 @@ abstract class ReplayingSendsSequencerClientTransportCommon( private def checkIfIdle(): Unit = { val stateSnapshot = stateRef.get() - val elapsedDuration = elapsed(stateSnapshot) - val isIdle = elapsedDuration.compareTo(idlenessDuration.toJava) >= 0 + val lastEventTime = stateSnapshot.lastEventAt.getOrElse(stateSnapshot.startedAt).toInstant + val elapsedDuration = + java.time.Duration.between(stateSnapshot.startedAt.toInstant, lastEventTime) + val isIdle = durationFromLastEventToNow(stateSnapshot).compareTo(idlenessDuration.toJava) >= 0 if (isIdle) { - idleP - .trySuccess( - EventsReceivedReport( - elapsedDuration.toScala, - totalEventsReceived = stateSnapshot.eventCounter, - finishedAtCounter = stateSnapshot.lastCounter, + if (pendingSends.sizeIs > 0) { + idleP + .tryFailure( + new IllegalStateException(s"There are ${pendingSends.size} pending send requests") ) - ) - .discard + .discard + } else { + idleP + .trySuccess( + EventsReceivedReport( + elapsedDuration.toScala, + totalEventsReceived = stateSnapshot.eventCounter, + finishedAtCounter = stateSnapshot.lastCounter, + ) + ) + .discard + } } else { scheduleCheck() // schedule the next check } } - private def elapsed(stateSnapshot: State) = { + private def durationFromLastEventToNow(stateSnapshot: State) = { val from = stateSnapshot.lastEventAt.getOrElse(stateSnapshot.startedAt) java.time.Duration.between(from.toInstant, Instant.now()) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala index 6ec7083e3..6a491fa54 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/SubmissionRequest.scala @@ -59,15 +59,16 @@ final case class SubmissionRequest private ( @VisibleForTesting def isConfirmationRequest: Boolean = { - val hasParticipantRecipient = batch.allMembers.exists { - case _: ParticipantId => true - case _: Member => false + val hasParticipantOrPopRecipient = batch.allRecipients.exists { + case MemberRecipient(_: ParticipantId) => true + case ParticipantsOfParty(_) => true + case _ => false } val hasMediatorRecipient = batch.allRecipients.exists { case _: MediatorGroupRecipient => true case _: Recipient => false } - hasParticipantRecipient && hasMediatorRecipient + hasParticipantOrPopRecipient && hasMediatorRecipient } // Caches the serialized request to be able to do checks on its size without re-serializing diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala index b77c4f49b..102a9dd0e 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala @@ -25,31 +25,35 @@ final case class TrafficState( extraTrafficPurchased: NonNegativeLong, extraTrafficConsumed: NonNegativeLong, baseTrafficRemainder: NonNegativeLong, + lastConsumedCost: NonNegativeLong, timestamp: CantonTimestamp, serial: Option[PositiveInt], ) extends PrettyPrinting { def extraTrafficRemainder: Long = extraTrafficPurchased.value - extraTrafficConsumed.value - def availableTraffic: Long = extraTrafficRemainder + baseTrafficRemainder.value + // Need big decimal here because it could overflow a long especially if extraTrafficPurchased == Long.MAX + lazy val availableTraffic: BigDecimal = + BigDecimal(extraTrafficRemainder) + BigDecimal(baseTrafficRemainder.value) def toProtoV30: v30.TrafficState = v30.TrafficState( extraTrafficPurchased = extraTrafficPurchased.value, extraTrafficConsumed = extraTrafficConsumed.value, baseTrafficRemainder = baseTrafficRemainder.value, + lastConsumedCost = lastConsumedCost.value, timestamp = timestamp.toProtoPrimitive, serial = serial.map(_.value), ) - def toTrafficConsumed(member: Member): TrafficConsumed = TrafficConsumed( - member = member, - sequencingTimestamp = timestamp, - extraTrafficConsumed = extraTrafficConsumed, - baseTrafficRemainder = baseTrafficRemainder, - ) + def toTrafficConsumed(member: Member): TrafficConsumed = + TrafficConsumed( + member = member, + sequencingTimestamp = timestamp, + extraTrafficConsumed = extraTrafficConsumed, + baseTrafficRemainder = baseTrafficRemainder, + lastConsumedCost = lastConsumedCost, + ) - def toTrafficReceipt( - consumedCost: NonNegativeLong - ): TrafficReceipt = TrafficReceipt( - consumedCost = consumedCost, + def toTrafficReceipt: TrafficReceipt = TrafficReceipt( + consumedCost = lastConsumedCost, extraTrafficConsumed = extraTrafficConsumed, baseTrafficRemainder = baseTrafficRemainder, ) @@ -67,6 +71,7 @@ final case class TrafficState( param("extraTrafficLimit", _.extraTrafficPurchased), param("extraTrafficConsumed", _.extraTrafficConsumed), param("baseTrafficRemainder", _.baseTrafficRemainder), + param("lastConsumedCost", _.lastConsumedCost), param("timestamp", _.timestamp), paramIfDefined("serial", _.serial), ) @@ -78,13 +83,15 @@ object TrafficState { pp >> Some(v.extraTrafficPurchased.value) pp >> Some(v.extraTrafficConsumed.value) pp >> Some(v.baseTrafficRemainder.value) + pp >> Some(v.lastConsumedCost.value) pp >> v.timestamp pp >> v.serial.map(_.value) } implicit val getResultTrafficState: GetResult[Option[TrafficState]] = { GetResult - .createGetTuple5( + .createGetTuple6( + nonNegativeLongOptionGetResult, nonNegativeLongOptionGetResult, nonNegativeLongOptionGetResult, nonNegativeLongOptionGetResult, @@ -98,6 +105,7 @@ object TrafficState { NonNegativeLong.zero, NonNegativeLong.zero, NonNegativeLong.zero, + NonNegativeLong.zero, CantonTimestamp.Epoch, Option.empty, ) @@ -106,6 +114,7 @@ object TrafficState { NonNegativeLong.zero, NonNegativeLong.zero, NonNegativeLong.zero, + NonNegativeLong.zero, timestamp, Option.empty, ) @@ -116,12 +125,14 @@ object TrafficState { extraTrafficLimit <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficPurchased) extraTrafficConsumed <- ProtoConverter.parseNonNegativeLong(trafficStateP.extraTrafficConsumed) baseTrafficRemainder <- ProtoConverter.parseNonNegativeLong(trafficStateP.baseTrafficRemainder) + lastConsumedCost <- ProtoConverter.parseNonNegativeLong(trafficStateP.lastConsumedCost) timestamp <- CantonTimestamp.fromProtoPrimitive(trafficStateP.timestamp) serial <- trafficStateP.serial.traverse(ProtoConverter.parsePositiveInt) } yield TrafficState( extraTrafficLimit, extraTrafficConsumed, baseTrafficRemainder, + lastConsumedCost, timestamp, serial, ) diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala index 8e9666c11..77833d2d5 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumed.scala @@ -24,18 +24,18 @@ import slick.jdbc.GetResult * @param sequencingTimestamp sequencing timestamp at which this traffic consumed state is valid * @param extraTrafficConsumed extra traffic consumed at this sequencing timestamp * @param baseTrafficRemainder base traffic remaining at this sequencing timestamp + * @param lastConsumedCost last cost deducted from the traffic balance (base and if not enough, extra) */ final case class TrafficConsumed( member: Member, sequencingTimestamp: CantonTimestamp, extraTrafficConsumed: NonNegativeLong, baseTrafficRemainder: NonNegativeLong, + lastConsumedCost: NonNegativeLong, ) extends PrettyPrinting { - def toTrafficReceipt( - consumedCost: NonNegativeLong - ): TrafficReceipt = TrafficReceipt( - consumedCost = consumedCost, + def toTrafficReceipt: TrafficReceipt = TrafficReceipt( + consumedCost = lastConsumedCost, extraTrafficConsumed, baseTrafficRemainder, ) @@ -48,6 +48,7 @@ final case class TrafficConsumed( trafficPurchased.map(_.extraTrafficPurchased).getOrElse(NonNegativeLong.zero), extraTrafficConsumed, baseTrafficRemainder, + lastConsumedCost, trafficPurchased .map(_.sequencingTimestamp.max(sequencingTimestamp)) .getOrElse(sequencingTimestamp), @@ -105,6 +106,7 @@ final case class TrafficConsumed( copy( baseTrafficRemainder = baseTrafficRemainderAtCurrentTime, sequencingTimestamp = timestamp, + lastConsumedCost = NonNegativeLong.zero, ) } @@ -127,6 +129,7 @@ final case class TrafficConsumed( baseTrafficRemainder = baseTrafficRemainderAfterConsume, extraTrafficConsumed = this.extraTrafficConsumed + extraTrafficConsumed, sequencingTimestamp = sequencingTimestamp, + lastConsumedCost = cost, ) } @@ -157,6 +160,7 @@ final case class TrafficConsumed( param("member", _.member), param("extraTrafficConsumed", _.extraTrafficConsumed), param("baseTrafficRemainder", _.baseTrafficRemainder), + param("lastConsumedCost", _.lastConsumedCost), param("sequencingTimestamp", _.sequencingTimestamp), ) @@ -166,6 +170,7 @@ final case class TrafficConsumed( extraTrafficConsumed = extraTrafficConsumed.value, baseTrafficRemainder = baseTrafficRemainder.value, sequencingTimestamp = sequencingTimestamp.toProtoPrimitive, + lastConsumedCost = lastConsumedCost.value, ) } } @@ -177,7 +182,13 @@ object TrafficConsumed { /** TrafficConsumed object for members the first time they submit a submission request */ def init(member: Member): TrafficConsumed = - TrafficConsumed(member, CantonTimestamp.MinValue, NonNegativeLong.zero, NonNegativeLong.zero) + TrafficConsumed( + member, + CantonTimestamp.MinValue, + NonNegativeLong.zero, + NonNegativeLong.zero, + NonNegativeLong.zero, + ) def empty( member: Member, @@ -188,16 +199,18 @@ object TrafficConsumed { timestamp, NonNegativeLong.zero, baseTraffic, + NonNegativeLong.zero, ) implicit val trafficConsumedOrdering: Ordering[TrafficConsumed] = Ordering.by(_.sequencingTimestamp) implicit val trafficConsumedGetResult: GetResult[TrafficConsumed] = - GetResult.createGetTuple4[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong].andThen { - case (member, ts, trafficConsumed, baseTraffic) => - TrafficConsumed(member, ts, trafficConsumed, baseTraffic) - } + GetResult + .createGetTuple5[Member, CantonTimestamp, NonNegativeLong, NonNegativeLong, NonNegativeLong] + .andThen { case (member, ts, trafficConsumed, baseTraffic, lastConsumedCost) => + TrafficConsumed(member, ts, trafficConsumed, baseTraffic, lastConsumedCost) + } def fromProtoV30(trafficConsumedP: TrafficConsumedP): ParsingResult[TrafficConsumed] = for { @@ -211,10 +224,14 @@ object TrafficConsumed { sequencingTimestamp <- CantonTimestamp.fromProtoPrimitive( trafficConsumedP.sequencingTimestamp ) + lastConsumedCost <- ProtoConverter.parseNonNegativeLong( + trafficConsumedP.lastConsumedCost + ) } yield TrafficConsumed( member = member, extraTrafficConsumed = extraTrafficConsumed, baseTrafficRemainder = baseTrafficRemainder, sequencingTimestamp = sequencingTimestamp, + lastConsumedCost = lastConsumedCost, ) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumedManager.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumedManager.scala index 1757072eb..a55f4a621 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumedManager.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/traffic/TrafficConsumedManager.scala @@ -43,6 +43,7 @@ class TrafficConsumedManager( current.copy( extraTrafficConsumed = trafficReceipt.extraTrafficConsumed, baseTrafficRemainder = trafficReceipt.baseTrafficRemainder, + lastConsumedCost = trafficReceipt.consumedCost, sequencingTimestamp = timestamp, ) case current => current @@ -101,7 +102,7 @@ class TrafficConsumedManager( }.discard Left(value) case Right(_) => - val newState = trafficConsumed.updateAndGet { + val newState = updateAndGet { _.consume(timestamp, params, eventCost, logger) } logger.debug(s"Consumed ${eventCost.value} for $member at $timestamp: new state $newState") diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala index d35a6937c..3f1e81bb1 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyManagerError.scala @@ -435,23 +435,6 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } - @Explanation( - "This error indicates that a threshold in the submitted transaction was higher than the number of members that would have to satisfy that threshold." - ) - @Resolution( - """Submit the topology transaction with a lower threshold. - |The metadata details of this error contain the expected maximum in the field ``expectedMaximum``.""" - ) - object InvalidThreshold - extends ErrorCode(id = "INVALID_THRESHOLD", ErrorCategory.InvalidIndependentOfSystemState) { - final case class ThresholdTooHigh(actual: Int, expectedMaximum: Int)(implicit - override val loggingContext: ErrorLoggingContext - ) extends CantonError.Impl( - cause = s"Threshold must not be higher than $expectedMaximum, but was $actual." - ) - with TopologyManagerError - } - @Explanation( "This error indicates that members referenced in a topology transaction have not declared at least one signing key or at least 1 encryption key or both." ) @@ -473,6 +456,20 @@ object TopologyManagerError extends TopologyManagerErrorGroup { with TopologyManagerError } + object PartyExceedsHostingLimit + extends ErrorCode( + id = "PARTY_EXCEEDS_HOSTING_LIMIT", + ErrorCategory.InvalidIndependentOfSystemState, + ) { + final case class Reject(party: PartyId, limit: Int, numParticipants: Int)(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + s"Party $party exceeds hosting limit of $limit with desired number of $numParticipants hosting participant." + ) + with TopologyManagerError + } + @Explanation( "This error indicates that the topology transaction references members that are currently unknown." ) @@ -572,7 +569,7 @@ object TopologyManagerError extends TopologyManagerErrorGroup { object InvalidTopologyMapping extends ErrorCode( id = "INVALID_TOPOLOGY_MAPPING", - ErrorCategory.InvalidGivenCurrentSystemStateOther, + ErrorCategory.InvalidIndependentOfSystemState, ) { final case class Reject( description: String @@ -605,7 +602,36 @@ object TopologyManagerError extends TopologyManagerErrorGroup { } ) with TopologyManagerError + + final case class MissingDomainParameters(effectiveTime: EffectiveTime)(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"Missing domain parameters at $effectiveTime" + ) + with TopologyManagerError } + + @Explanation( + """This error indicates that the namespace is already used by another entity.""" + ) + @Resolution( + """Change the namespace used in the submitted topology transaction.""" + ) + object NamespaceAlreadyInUse + extends ErrorCode( + id = "NAMESPACE_ALREADY_IN_USE", + ErrorCategory.InvalidGivenCurrentSystemStateResourceExists, + ) { + final case class Reject( + namespace: Namespace + )(implicit + override val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"The namespace $namespace is already in use by another entity." + ) + with TopologyManagerError + } + abstract class DomainErrorGroup extends ErrorGroup() abstract class ParticipantErrorGroup extends ErrorGroup() diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala index 19cc7e442..cdc8f7e7e 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/TopologyStateProcessor.scala @@ -167,6 +167,7 @@ class TopologyStateProcessor( s"${enqueuingOrStoring} topology transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} with ts=$effective (epsilon=${epsilon} ms)" ) case (ValidatedTopologyTransaction(tx, Some(r), _), idx) => + // TODO(i19737): we need to emit a security alert, if the rejection is due to a malicious broadcast logger.info( s"Rejected transaction ${idx + 1}/$ln ${tx.operation} ${tx.mapping} at ts=$effective (epsilon=${epsilon} ms) due to $r" ) @@ -296,18 +297,13 @@ class TopologyStateProcessor( authValidator .validateAndUpdateHeadAuthState( effective.value, - Seq(toValidate), - inStore.map(tx => tx.mapping.uniqueKey -> tx).toList.toMap, + toValidate, + inStore, expectFullAuthorization, ) ) - .subflatMap { case (_, txs) => - // TODO(#12390) proper error - txs.headOption - .toRight[TopologyTransactionRejection]( - TopologyTransactionRejection.Other("expected validation result doesn't exist") - ) - .flatMap(tx => tx.rejectionReason.toLeft(tx.transaction)) + .subflatMap { case (_, tx) => + tx.rejectionReason.toLeft(tx.transaction) } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala index 157b156e2..0a5ef93c1 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClient.scala @@ -299,6 +299,10 @@ trait PartyTopologySnapshotClient { parties: Seq[LfPartyId] )(implicit traceContext: TraceContext): Future[Set[LfPartyId]] + def activeParticipantsOfPartiesWithGroupAddressing( + parties: Seq[LfPartyId] + )(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] + /** Returns a list of all known parties on this domain */ def inspectKnownParties( filterParty: String, @@ -841,6 +845,11 @@ private[client] trait PartyTopologySnapshotLoader ): Future[Set[LfPartyId]] = loadAndMapPartyInfos(parties, identity, _.groupAddressing).map(_.keySet) + final override def activeParticipantsOfPartiesWithGroupAddressing( + parties: Seq[LfPartyId] + )(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] = + loadAndMapPartyInfos(parties, _.participants.keySet, _.groupAddressing) + final override def consortiumThresholds( parties: Set[LfPartyId] )(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] = diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala index c17daa5d2..328b67e09 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/processing/AuthorizationGraph.scala @@ -8,12 +8,12 @@ import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.topology.Namespace import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction.AuthorizedNamespaceDelegation +import com.digitalasset.canton.topology.transaction.TopologyChangeOp.{Remove, Replace} import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.ShowUtil.* -import scala.annotation.tailrec import scala.collection.concurrent.TrieMap import scala.math.Ordering.Implicits.* @@ -35,8 +35,8 @@ object AuthorizedTopologyTransaction { /** Returns true if the namespace delegation is a root certificate * - * A root certificate is defined by the namespace delegation that authorizes the - * key f to act on namespace spanned by f, authorized by f. + * A root certificate is defined by a namespace delegation that authorizes the + * key f to act on the namespace spanned by f, authorized by f. */ def isRootCertificate(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = { NamespaceDelegation.isRootCertificate(namespaceDelegation.transaction) @@ -44,11 +44,7 @@ object AuthorizedTopologyTransaction { /** Returns true if the namespace delegation is a root certificate or a root delegation * - * A root certificate is defined by the namespace delegation that authorizes the - * key f to act on namespace spanned by f, authorized by f. - * - * A root delegation is defined by the namespace delegation the authorizes the - * key g to act on namespace spanned by f. + * A root delegation is a namespace delegation whose target key may be used to authorize other namespace delegations. */ def isRootDelegation(namespaceDelegation: AuthorizedNamespaceDelegation): Boolean = { NamespaceDelegation.isRootDelegation(namespaceDelegation.transaction) @@ -56,49 +52,45 @@ object AuthorizedTopologyTransaction { } -/** maintain a dependency graph for the namespace delegations +/** Stores a set of namespace delegations, tracks dependencies and + * determines which keys are authorized to sign on behalf of a namespace. * - * namespace delegations are a bit tricky as there can be an arbitrary number of delegations before we reach - * the actual key that will be used for authorizations. think of it as a certificate chain where we get a + * Namespace delegations are a bit tricky as there can be an arbitrary number of delegations between the namespace key + * and the key that will be used for authorizations. Think of it as a certificate chain where we get a * series of certificates and we need to figure out a path from one certificate to the root certificate. * * NOTE: this class is not thread-safe * - * properties of the graph: - * - the nodes are the target key fingerprints - * - the node with fingerprint of the namespace is the root node - * - the edges between the nodes are the authorizations where key A authorizes key B to act on the namespace - * in this case, the authorization is outgoing from A and incoming to B. - * - the graph SHOULD be a directed acyclic graph, but we MIGHT have cycles (i.e. key A authorizing B, B authorizing A). - * we don't need to make a fuss about cycles in the graph. we just ignore / report them assuming it was an admin - * mistake, but we don't get confused. - * - root certificates are edges pointing to the node itself. they are separate such that they don't show up - * in the list of incoming / outgoing. - * - we track for each node the set of outgoing edges and incoming edges. an outgoing edge is a delegation where - * the source node is authorizing a target node. obviously every outgoing edge is also an incoming edge. + * Properties of the graph: + * - Each node corresponds to a target key + * - The node with key fingerprint of the namespace is the root node + * - The edges between nodes are namespace delegations. + * If key A signs a namespace delegation with target key B, then key A authorizes key B to act on the namespace. + * In this case, the edge is outgoing from node A and incoming into node B. + * - The graph may have cycles. The implementation does not get confused by this. * - * computation task: - * - once we've modified the graph, we compute the nodes that are somehow connected to the root node. + * Computation task: + * The graph maintains a set of nodes that are connected to the root node. Those correspond to the keys that are + * authorized to sign on behalf of the namespace. * - * purpose: - * - once we know which target keys are actually authorized to act on this particular namespace, we can then use - * this information to find out which resulting mapping is properly authorized and which one is not. + * Limitation: clients need to ensure that the namespace delegations added have valid signatures. + * If delegations with invalid signatures are added, authorization will break. * - * authorization checks: - * - when adding "single transactions", we do check that the transaction is properly authorized. otherwise we - * "ignore" it (returning false). this is used during processing. - * - when adding "batch transactions", we don't check that all of them are properly authorized, as we do allow - * temporarily "nodes" to be unauthorized (so that errors can be fixed by adding a replacement certificate) - * - when removing transactions, we do check that the authorizing key is authorized. but note that the authorizing - * key of an edge REMOVAL doesn't need to match the key used to authorized the ADD. + * @param extraDebugInfo whether to log the authorization graph at debug level on every recomputation */ class AuthorizationGraph( val namespace: Namespace, extraDebugInfo: Boolean, - val loggerFactory: NamedLoggerFactory, + override protected val loggerFactory: NamedLoggerFactory, ) extends AuthorizationCheck with NamedLogging { + /** @param root the last active root certificate for `target` + * @param outgoing all active namespace delegations (excluding root certificates) authorized by `target` + * @param incoming all active namespace delegations for the namespace `target` + * + * All namespace delegations are for namespace `this.namespace`. + */ private case class GraphNode( target: Fingerprint, root: Option[AuthorizedNamespaceDelegation] = None, @@ -113,9 +105,9 @@ class AuthorizationGraph( private abstract class AuthLevel(val isAuth: Boolean, val isRoot: Boolean) private object AuthLevel { - object NotAuthorized extends AuthLevel(false, false) - object Standard extends AuthLevel(true, false) - object RootDelegation extends AuthLevel(true, true) + private object NotAuthorized extends AuthLevel(false, false) + private object Standard extends AuthLevel(true, false) + private object RootDelegation extends AuthLevel(true, true) implicit val orderingAuthLevel: Ordering[AuthLevel] = Ordering.by[AuthLevel, Int](authl => Seq(authl.isAuth, authl.isRoot).count(identity)) @@ -129,23 +121,30 @@ class AuthorizationGraph( } + /** GraphNodes by GraphNode.target */ private val nodes = new TrieMap[Fingerprint, GraphNode]() - /** temporary cache for the current graph authorization check results - * - * if a fingerprint is empty, then we haven't yet computed the answer - */ + /** Authorized namespace delegations for namespace `this.namespace`, grouped by target */ private val cache = - new TrieMap[Fingerprint, Option[AuthorizedNamespaceDelegation]]() + new TrieMap[Fingerprint, AuthorizedNamespaceDelegation]() + /** Check if `item` is authorized and, if so, add its mapping to this graph. + * + * @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE. + */ def add(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = { ErrorUtil.requireArgument( item.mapping.namespace == namespace, - s"added namespace ${item.mapping.namespace} to $namespace", + s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace", + ) + ErrorUtil.requireArgument( + item.operation == Replace, + s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace", ) + if ( AuthorizedTopologyTransaction.isRootCertificate(item) || - this.areValidAuthorizationKeys(item.signingKeys, requireRoot = true) + this.existsAuthorizedKeyIn(item.signingKeys, requireRoot = true) ) { doAdd(item) recompute() @@ -153,6 +152,12 @@ class AuthorizationGraph( } else false } + /** Add the mappings in `items` to this graph, regardless if they are authorized or not. + * If an unauthorized namespace delegation is added to the graph, the graph will contain nodes that are not connected to the root. + * The target key of the unauthorized delegation will still be considered unauthorized. + * + * @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REPLACE. + */ def unauthorizedAdd( items: Seq[AuthorizedNamespaceDelegation] )(implicit traceContext: TraceContext): Unit = { @@ -163,6 +168,15 @@ class AuthorizationGraph( private def doAdd( item: AuthorizedNamespaceDelegation )(implicit traceContext: TraceContext): Unit = { + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"unable to add namespace delegation for ${item.mapping.namespace} to graph for $namespace", + ) + ErrorUtil.requireArgument( + item.operation == Replace, + s"unable to add namespace delegation with operation ${item.operation} to graph for $namespace", + ) + val targetKey = item.mapping.target.fingerprint val curTarget = nodes.getOrElse(targetKey, GraphNode(targetKey)) // if this is a root certificate, remember it separately @@ -181,32 +195,38 @@ class AuthorizationGraph( } } - def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = - if (areValidAuthorizationKeys(item.signingKeys, requireRoot = true)) { + /** Check if `item` is authorized and, if so, remove its mapping from this graph. + * Note that addition and removal of a namespace delegation can be authorized by different keys. + * + * @throws java.lang.IllegalArgumentException if `item` does not refer to `namespace` or the operation is not REMOVE. + */ + def remove(item: AuthorizedNamespaceDelegation)(implicit traceContext: TraceContext): Boolean = { + ErrorUtil.requireArgument( + item.mapping.namespace == namespace, + s"unable to remove namespace delegation for ${item.mapping.namespace} from graph for $namespace", + ) + + ErrorUtil.requireArgument( + item.operation == Remove, + s"unable to remove namespace delegation with operation ${item.operation} from graph for $namespace", + ) + + if (existsAuthorizedKeyIn(item.signingKeys, requireRoot = true)) { doRemove(item) true } else false - - def unauthorizedRemove( - items: Seq[AuthorizedNamespaceDelegation] - )(implicit traceContext: TraceContext): Unit = { - items.foreach(doRemove) } /** remove a namespace delegation * - * note that this one is a bit tricky as the removal might have been authorized - * by a different key than the addition. this is fine but it complicates the book-keeping, + * The implementation is a bit tricky as the removal might have been authorized + * by a different key than the addition. This complicates the book-keeping, * as we need to track for each target key what the "incoming authorizations" were solely for the - * purpose of being able to clean them up + * purpose of being able to clean them up. */ private def doRemove( item: AuthorizedNamespaceDelegation )(implicit traceContext: TraceContext): Unit = { - ErrorUtil.requireArgument( - item.mapping.namespace == namespace, - s"removing namespace ${item.mapping.namespace} from $namespace", - ) def myFilter(existing: AuthorizedNamespaceDelegation): Boolean = { // the auth key doesn't need to match on removals existing.mapping != item.mapping @@ -248,10 +268,9 @@ class AuthorizationGraph( updateRemove(targetKey, curTarget.copy(incoming = curTarget.incoming.filter(myFilter))) } recompute() - case None => - logger.warn(s"Superfluous removal of namespace delegation $item") - } + case None => logger.warn(s"Superfluous removal of namespace delegation $item") + } } protected def recompute()(implicit traceContext: TraceContext): Unit = { @@ -269,12 +288,12 @@ class AuthorizationGraph( fingerprint: Fingerprint, incoming: AuthorizedNamespaceDelegation, ): Unit = { - val current = cache.getOrElseUpdate(fingerprint, None) + val current = cache.get(fingerprint) val currentLevel = AuthLevel.fromDelegationO(current) val incomingLevel = AuthLevel.fromDelegationO(Some(incoming)) // this inherited level is higher than current, propagate it if (incomingLevel > currentLevel) { - cache.update(fingerprint, Some(incoming)) + cache.update(fingerprint, incoming) // get the graph node of this fingerprint nodes.get(fingerprint).foreach { graphNode => // iterate through all edges that depart from this node @@ -310,7 +329,7 @@ class AuthorizationGraph( } if (extraDebugInfo && logger.underlying.isDebugEnabled) { val str = - authorizedDelegations() + cache.values .map(aud => show"auth=${aud.signingKeys}, target=${aud.mapping.target.fingerprint}, root=${AuthorizedTopologyTransaction .isRootCertificate(aud)}" @@ -320,144 +339,99 @@ class AuthorizationGraph( } } else logger.debug( - s"Namespace ${namespace} has no root certificate, making all ${nodes.size} un-authorized" + s"Namespace $namespace has no root certificate, making all ${nodes.size} un-authorized" ) - override def areValidAuthorizationKeys( + override def existsAuthorizedKeyIn( authKeys: Set[Fingerprint], requireRoot: Boolean, - ): Boolean = { - authKeys.exists { authKey => - val authLevel = AuthLevel.fromDelegationO(cache.getOrElse(authKey, None)) - authLevel.isRoot || (authLevel.isAuth && !requireRoot) - } - } + ): Boolean = authKeys.exists(getAuthorizedKey(_, requireRoot).nonEmpty) - override def getValidAuthorizationKeys( - authKeys: Set[Fingerprint], + private def getAuthorizedKey( + authKey: Fingerprint, requireRoot: Boolean, - ): Set[SigningPublicKey] = authKeys.flatMap(authKey => + ): Option[SigningPublicKey] = cache - .getOrElse(authKey, None) + .get(authKey) + .filter { delegation => + val authLevel = AuthLevel.fromDelegationO(Some(delegation)) + authLevel.isRoot || (authLevel.isAuth && !requireRoot) + } .map(_.mapping.target) - .filter(_ => areValidAuthorizationKeys(Set(authKey), requireRoot)) - ) - def authorizationChain( - startAuthKey: Fingerprint, + override def keysSupportingAuthorization( + authKeys: Set[Fingerprint], requireRoot: Boolean, - ): Option[AuthorizationChain] = { - @tailrec - def go( - authKey: Fingerprint, - requireRoot: Boolean, - acc: List[AuthorizedNamespaceDelegation], - ): List[AuthorizedNamespaceDelegation] = { - cache.getOrElse(authKey, None) match { - // we've terminated with the root certificate - case Some(delegation) if AuthorizedTopologyTransaction.isRootCertificate(delegation) => - delegation :: acc - // cert is valid, append it - case Some(delegation) if delegation.mapping.isRootDelegation || !requireRoot => - go(delegation.signingKeys.head1, delegation.mapping.isRootDelegation, delegation :: acc) - // return empty to indicate failure - case _ => List.empty - } - } - go(startAuthKey, requireRoot, List.empty) match { - case Nil => None - case rest => - Some( - AuthorizationChain( - identifierDelegation = Seq.empty, - namespaceDelegations = rest, - Seq.empty, - ) - ) - } - } - - def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = - cache.values.flatMap(_.toList).toSeq + ): Set[SigningPublicKey] = authKeys.flatMap(getAuthorizedKey(_, requireRoot)) override def toString: String = s"AuthorizationGraph($namespace)" - - def debugInfo() = s"$namespace => ${nodes.mkString("\n")}" } trait AuthorizationCheck { - def areValidAuthorizationKeys(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean - def getValidAuthorizationKeys( + /** Determines if a subset of the given keys is authorized to sign on behalf of the (possibly decentralized) namespace. + * + * @param requireRoot whether the authorization must be suitable to authorize namespace delegations + */ + def existsAuthorizedKeyIn(authKeys: Set[Fingerprint], requireRoot: Boolean): Boolean + + /** Returns those keys that are useful for signing on behalf of the (possibly decentralized) namespace. + * Only keys with fingerprint in `authKeys` will be returned. + * The returned keys are not necessarily sufficient to authorize a transaction on behalf of the namespace; + * in case of a decentralized namespace, additional signatures may be required. + */ + def keysSupportingAuthorization( authKeys: Set[Fingerprint], requireRoot: Boolean, ): Set[SigningPublicKey] - - def authorizationChain( - startAuthKey: Fingerprint, - requireRoot: Boolean, - ): Option[AuthorizationChain] - - def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] } object AuthorizationCheck { - val empty = new AuthorizationCheck { - override def areValidAuthorizationKeys( + val empty: AuthorizationCheck = new AuthorizationCheck { + override def existsAuthorizedKeyIn( authKeys: Set[Fingerprint], requireRoot: Boolean, ): Boolean = false - override def authorizationChain( - startAuthKey: Fingerprint, - requireRoot: Boolean, - ): Option[AuthorizationChain] = None - - override def getValidAuthorizationKeys( + override def keysSupportingAuthorization( authKeys: Set[Fingerprint], requireRoot: Boolean, ): Set[SigningPublicKey] = Set.empty - override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = Seq.empty - override def toString: String = "AuthorizationCheck.empty" } } +/** Authorization graph for a decentralized namespace. + * + * @throws java.lang.IllegalArgumentException if `dnd` and `direct` refer to different namespaces. + */ final case class DecentralizedNamespaceAuthorizationGraph( dnd: DecentralizedNamespaceDefinition, direct: AuthorizationGraph, ownerGraphs: Seq[AuthorizationGraph], ) extends AuthorizationCheck { - override def areValidAuthorizationKeys( + require( + dnd.namespace == direct.namespace, + s"The direct graph refers to the wrong namespace (expected: ${dnd.namespace}, actual: ${direct.namespace}).", + ) + + override def existsAuthorizedKeyIn( authKeys: Set[Fingerprint], requireRoot: Boolean, ): Boolean = { - val viaNamespaceDelegation = direct.areValidAuthorizationKeys(authKeys, requireRoot) + val viaNamespaceDelegation = direct.existsAuthorizedKeyIn(authKeys, requireRoot) val viaCollective = - ownerGraphs.count(_.areValidAuthorizationKeys(authKeys, requireRoot)) >= dnd.threshold.value + ownerGraphs.count(_.existsAuthorizedKeyIn(authKeys, requireRoot)) >= dnd.threshold.value viaNamespaceDelegation || viaCollective } - import cats.syntax.foldable.* - - override def getValidAuthorizationKeys( + override def keysSupportingAuthorization( authKeys: Set[Fingerprint], requireRoot: Boolean, ): Set[SigningPublicKey] = { (direct +: ownerGraphs) - .flatMap(_.getValidAuthorizationKeys(authKeys, requireRoot)) + .flatMap(_.keysSupportingAuthorization(authKeys, requireRoot)) .toSet } - - override def authorizationChain( - startAuthKey: Fingerprint, - requireRoot: Boolean, - ): Option[AuthorizationChain] = - direct - .authorizationChain(startAuthKey, requireRoot) - .orElse(ownerGraphs.map(_.authorizationChain(startAuthKey, requireRoot)).combineAll) - - override def authorizedDelegations(): Seq[AuthorizedNamespaceDelegation] = - direct.authorizedDelegations() ++ ownerGraphs.flatMap(_.authorizedDelegations()) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala index e509b2ac7..5f0d7a9ed 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidator.scala @@ -5,7 +5,8 @@ package com.digitalasset.canton.topology.processing import cats.Monoid import cats.data.EitherT -import cats.syntax.parallel.* +import cats.syntax.bifunctor.* +import cats.syntax.foldable.* import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.CryptoPureApi import com.digitalasset.canton.data.CantonTimestamp @@ -20,14 +21,10 @@ import com.digitalasset.canton.topology.processing.AuthorizedTopologyTransaction import com.digitalasset.canton.topology.store.ValidatedTopologyTransaction.GenericValidatedTopologyTransaction import com.digitalasset.canton.topology.store.* import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.topology.transaction.TopologyMapping.{ - MappingHash, - RequiredAuthAuthorizations, -} +import com.digitalasset.canton.topology.transaction.TopologyMapping.RequiredAuthAuthorizations import com.digitalasset.canton.topology.transaction.TopologyTransaction.GenericTopologyTransaction import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.FutureInstances.* import scala.concurrent.{ExecutionContext, Future} @@ -130,17 +127,14 @@ class IncomingTopologyTransactionAuthorizationValidator( */ def validateAndUpdateHeadAuthState( timestamp: CantonTimestamp, - transactionsToValidate: Seq[GenericSignedTopologyTransaction], - transactionsInStore: Map[MappingHash, GenericSignedTopologyTransaction], + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericSignedTopologyTransaction], expectFullAuthorization: Boolean, )(implicit traceContext: TraceContext - ): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = { + ): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = { for { - authCheckResult <- determineRelevantUidsAndNamespaces( - transactionsToValidate, - transactionsInStore.view.mapValues(_.transaction).toMap, - ) + authCheckResult <- determineRelevantUidsAndNamespaces(toValidate, inStore.map(_.transaction)) (updateAggregation, targetDomainVerified) = authCheckResult loadGraphsF = loadAuthorizationGraphs(timestamp, updateAggregation.authNamespaces) loadUidsF = loadIdentifierDelegationsCascading( @@ -153,11 +147,11 @@ class IncomingTopologyTransactionAuthorizationValidator( } yield { logger.debug(s"Update aggregation yielded ${updateAggregation}") - val validated = targetDomainVerified.map { + val validated = targetDomainVerified match { case ValidatedTopologyTransaction(tx, None, _) => processTransaction( tx, - transactionsInStore.get(tx.mapping.uniqueKey), + inStore, expectFullAuthorization, ) case v => v @@ -173,101 +167,124 @@ class IncomingTopologyTransactionAuthorizationValidator( } } + /** Validates a topology transaction as follows: + *
    + *
  1. check that the transaction has valid signatures and is sufficiently authorized. if not, reject.
  2. + *
  3. if there are no missing authorizers, as is the case for proposals, we update internal caches for NSD, IDD, and DND
  4. + *
  5. if this validation is run to determine a final verdict, as is the case for processing topology transactions coming from the domain, + * automatically clear the proposal flag for transactions with sufficent authorizing signatures.
  6. + *
+ */ private def processTransaction( toValidate: GenericSignedTopologyTransaction, inStore: Option[GenericSignedTopologyTransaction], expectFullAuthorization: Boolean, )(implicit traceContext: TraceContext): GenericValidatedTopologyTransaction = { - val processedNs = toValidate.selectMapping[NamespaceDelegation].forall { sigTx => - processNamespaceDelegation( - toValidate.operation, - AuthorizedTopologyTransaction(sigTx), - ) - } + // See validateRootCertificate why we need to check the removal of a root certificate explicitly here. + val signatureCheckResult = validateRootCertificate(toValidate) + .getOrElse(validateSignaturesAndDetermineMissingAuthorizers(toValidate, inStore)) - val processedIdent = toValidate.selectMapping[IdentifierDelegation].forall { sigTx => - processIdentifierDelegation( - toValidate.operation, - AuthorizedTopologyTransaction(sigTx), - ) - } - - val resultDns = toValidate.selectMapping[DecentralizedNamespaceDefinition].map { sigTx => - processDecentralizedNamespaceDefinition( - sigTx.operation, - AuthorizedTopologyTransaction(sigTx), - ) - } - val processedDns = resultDns.forall(_._1) - val mappingSpecificCheck = processedNs && processedIdent && processedDns - - // the transaction is fully authorized if either - // 1. it's a root certificate, or - // 2. there is no authorization error and there are no missing authorizers - // We need to check explicitly for the root certificate here, because a REMOVE operation - // removes itself from the authorization graph, and therefore `isCurrentlyAuthorized` cannot validate it. - val authorizationResult = - if (NamespaceDelegation.isRootCertificate(toValidate)) - Right( - ( - toValidate, - RequiredAuthAuthorizations.empty, // no missing authorizers - ) - ) - else isCurrentlyAuthorized(toValidate, inStore) - - authorizationResult match { + signatureCheckResult match { // propagate the rejection reason case Left(rejectionReason) => ValidatedTopologyTransaction(toValidate, Some(rejectionReason)) // if a transaction wasn't outright rejected, run some additional checks case Right((validatedTx, missingAuthorizers)) => - // The mappingSpecificCheck is a necessary condition for having sufficient authorizers. - val isFullyAuthorized = - mappingSpecificCheck && missingAuthorizers.isEmpty - - // If a decentralizedNamespace transaction is fully authorized, reflect so in the decentralizedNamespace cache. - // Note: It seems a bit unsafe to update the caches on the assumption that the update will also be eventually - // persisted by the caller (a few levels up the call chain in TopologyStateProcessor.validateAndApplyAuthorization - // as the caller performs additional checks such as the numeric value of the serial number). - // But at least this is safer than where the check was previously (inside processDecentralizedNamespaceDefinition before even - // `isCurrentlyAuthorized` above had finished all checks). - if (isFullyAuthorized) { - resultDns.foreach { case (_, updateDecentralizedNamespaceCache) => - updateDecentralizedNamespaceCache() - } - } + handleSuccessfulSignatureChecks( + validatedTx, + missingAuthorizers, + expectFullAuthorization, + ) + } + } - val acceptMissingAuthorizers = - validatedTx.isProposal && !expectFullAuthorization - - // if the result of this validation is final (when processing transactions for the authorized store - // or sequenced transactions from the domain) we set the proposal flag according to whether the transaction - // is fully authorized or not. - // This must not be done when preliminarily validating transactions via the DomainTopologyManager, because - // the validation outcome might change when validating the transaction again after it has been sequenced. - val finalTransaction = - if (validationIsFinal) validatedTx.copy(isProposal = !isFullyAuthorized) - else validatedTx - - // Either the transaction is fully authorized or the request allows partial authorization - if (isFullyAuthorized || acceptMissingAuthorizers) { - ValidatedTopologyTransaction(finalTransaction, None) - } else { - if (!missingAuthorizers.isEmpty) { - logger.debug(s"Missing authorizers: $missingAuthorizers") - } - if (!mappingSpecificCheck) { - logger.debug(s"Mapping specific check failed") - } - ValidatedTopologyTransaction( - toValidate, - Some(TopologyTransactionRejection.NotAuthorized), - ) + private def handleSuccessfulSignatureChecks( + toValidate: GenericSignedTopologyTransaction, + missingAuthorizers: RequiredAuthAuthorizations, + expectFullAuthorization: Boolean, + )(implicit + traceContext: TraceContext + ): ValidatedTopologyTransaction[TopologyChangeOp, TopologyMapping] = { + // if there are no missing authorizers, we can update the internal caches + val isFullyAuthorized = if (missingAuthorizers.isEmpty) { + val processedNSD = toValidate + .selectMapping[NamespaceDelegation] + .forall { sigTx => processNamespaceDelegation(AuthorizedTopologyTransaction(sigTx)) } + + val processedIDD = toValidate.selectMapping[IdentifierDelegation].forall { sigTx => + processIdentifierDelegation(AuthorizedTopologyTransaction(sigTx)) + } + + val processedDND = + toValidate.selectMapping[DecentralizedNamespaceDefinition].forall { sigTx => + processDecentralizedNamespaceDefinition(AuthorizedTopologyTransaction(sigTx)) } + val mappingSpecificCheck = processedNSD && processedIDD && processedDND + if (!mappingSpecificCheck) { + logger.debug(s"Mapping specific check failed") + } + mappingSpecificCheck + } else { false } + + val acceptMissingAuthorizers = + toValidate.isProposal && !expectFullAuthorization + + // if the result of this validation is final (when processing transactions for the authorized store + // or sequenced transactions from the domain) we set the proposal flag according to whether the transaction + // is fully authorized or not. + // This must not be done when preliminarily validating transactions via the DomainTopologyManager, because + // the validation outcome might change when validating the transaction again after it has been sequenced. + val finalTransaction = + if (validationIsFinal) toValidate.copy(isProposal = !isFullyAuthorized) + else toValidate + + // Either the transaction is fully authorized or the request allows partial authorization + if (isFullyAuthorized || acceptMissingAuthorizers) { + ValidatedTopologyTransaction(finalTransaction, None) + } else { + if (!missingAuthorizers.isEmpty) { + logger.debug(s"Missing authorizers: $missingAuthorizers") + } + ValidatedTopologyTransaction( + toValidate, + Some(TopologyTransactionRejection.NotAuthorized), + ) } } + /** Validates the signature of the removal of a root certificate. + * This check is done separately from the mechanism used for other topology transactions (ie isCurrentlyAuthorized), + * because removing a root certificate removes it from the authorization graph and therefore + * isCurrentlyAuthorized would not find the key to validate it. + */ + private def validateRootCertificate( + toValidate: GenericSignedTopologyTransaction + ): Option[Either[ + TopologyTransactionRejection, + (GenericSignedTopologyTransaction, RequiredAuthAuthorizations), + ]] = { + toValidate + .selectMapping[NamespaceDelegation] + .filter(NamespaceDelegation.isRootCertificate) + .map { rootCert => + val result = rootCert.signatures.toSeq.forgetNE + .traverse_( + pureCrypto + .verifySignature( + rootCert.hash.hash, + rootCert.mapping.target, + _, + ) + ) + .bimap( + TopologyTransactionRejection.SignatureCheckFailed, + _ => (toValidate, RequiredAuthAuthorizations.empty /* no missing authorizers */ ), + ) + result + } + + } + /** loads all identifier delegations into the identifier delegation cache * * This function has two "modes". On a cascading update affecting namespaces, we have @@ -291,16 +308,15 @@ class IncomingTopologyTransactionAuthorizationValidator( } private def processIdentifierDelegation( - op: TopologyChangeOp, - tx: AuthorizedIdentifierDelegation, + tx: AuthorizedIdentifierDelegation ): Boolean = { // check authorization val check = getAuthorizationCheckForNamespace(tx.mapping.identifier.namespace) - val keysAreValid = check.areValidAuthorizationKeys(tx.signingKeys, requireRoot = false) + val keysAreValid = check.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false) // update identifier delegation cache if necessary if (keysAreValid) { val updateOp: Set[AuthorizedIdentifierDelegation] => Set[AuthorizedIdentifierDelegation] = - op match { + tx.operation match { case TopologyChangeOp.Replace => x => x + tx case TopologyChangeOp.Remove => @@ -313,12 +329,11 @@ class IncomingTopologyTransactionAuthorizationValidator( } private def processNamespaceDelegation( - op: TopologyChangeOp, - tx: AuthorizedNamespaceDelegation, + tx: AuthorizedNamespaceDelegation )(implicit traceContext: TraceContext): Boolean = { val graph = getAuthorizationGraphForNamespace(tx.mapping.namespace) // add or remove including authorization check - op match { + tx.operation match { case TopologyChangeOp.Replace => graph.add(tx) case TopologyChangeOp.Remove => graph.remove(tx) } @@ -330,9 +345,8 @@ class IncomingTopologyTransactionAuthorizationValidator( * by the caller once the mapping is to be committed. */ private def processDecentralizedNamespaceDefinition( - op: TopologyChangeOp, - tx: AuthorizedDecentralizedNamespaceDefinition, - )(implicit traceContext: TraceContext): (Boolean, () => Unit) = { + tx: AuthorizedDecentralizedNamespaceDefinition + )(implicit traceContext: TraceContext): Boolean = { val decentralizedNamespace = tx.mapping.namespace val dnsGraph = decentralizedNamespaceCache .get(decentralizedNamespace) @@ -360,26 +374,30 @@ class IncomingTopologyTransactionAuthorizationValidator( ) newDecentralizedNamespaceGraph } - val isAuthorized = dnsGraph.areValidAuthorizationKeys(tx.signingKeys, false) - - ( - isAuthorized, - () => { - val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace) - decentralizedNamespaceCache - .put( - decentralizedNamespace, - (tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)), - ) - .discard - }, - ) + val isAuthorized = dnsGraph.existsAuthorizedKeyIn(tx.signingKeys, requireRoot = false) + + if (isAuthorized) { + tx.operation match { + case TopologyChangeOp.Remove => + decentralizedNamespaceCache.remove(decentralizedNamespace).discard + + case TopologyChangeOp.Replace => + val ownerGraphs = tx.mapping.owners.forgetNE.toSeq.map(getAuthorizationGraphForNamespace) + decentralizedNamespaceCache + .put( + decentralizedNamespace, + (tx.mapping, dnsGraph.copy(dnd = tx.mapping, ownerGraphs = ownerGraphs)), + ) + .discard + } + } + isAuthorized } private def determineRelevantUidsAndNamespaces( - transactionsToValidate: Seq[GenericSignedTopologyTransaction], - transactionsInStore: Map[MappingHash, GenericTopologyTransaction], - ): Future[(UpdateAggregation, Seq[GenericValidatedTopologyTransaction])] = { + toValidate: GenericSignedTopologyTransaction, + inStore: Option[GenericTopologyTransaction], + ): Future[(UpdateAggregation, GenericValidatedTopologyTransaction)] = { def verifyDomain( tx: GenericSignedTopologyTransaction ): Either[TopologyTransactionRejection, Unit] = @@ -395,22 +413,19 @@ class IncomingTopologyTransactionAuthorizationValidator( // we need to figure out for which namespaces and uids we need to load the validation checks // and for which uids and namespaces we'll have to perform a cascading update - import UpdateAggregation.monoid - transactionsToValidate.parFoldMapA { toValidate => - EitherT - .fromEither[Future](verifyDomain(toValidate)) - .fold( - rejection => - (UpdateAggregation(), Seq(ValidatedTopologyTransaction(toValidate, Some(rejection)))), - _ => - ( - UpdateAggregation().add( - toValidate.mapping, - transactionsInStore.get(toValidate.mapping.uniqueKey), - ), - Seq(ValidatedTopologyTransaction(toValidate, None)), + EitherT + .fromEither[Future](verifyDomain(toValidate)) + .fold( + rejection => + (UpdateAggregation(), ValidatedTopologyTransaction(toValidate, Some(rejection))), + _ => + ( + UpdateAggregation().add( + toValidate.mapping, + inStore, ), - ) - } + ValidatedTopologyTransaction(toValidate, None), + ), + ) } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala index af8fd3bf9..0363df941 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/processing/TransactionAuthorizationValidator.scala @@ -43,7 +43,7 @@ trait TransactionAuthorizationValidator { protected def pureCrypto: CryptoPureApi - def isCurrentlyAuthorized( + def validateSignaturesAndDetermineMissingAuthorizers( toValidate: GenericSignedTopologyTransaction, inStore: Option[GenericSignedTopologyTransaction], )(implicit @@ -72,41 +72,41 @@ trait TransactionAuthorizationValidator { val namespaceWithRootAuthorizations = required.namespacesWithRoot.map { ns => val check = getAuthorizationCheckForNamespace(ns) - val keysWithDelegation = check.getValidAuthorizationKeys( + val keysUsed = check.keysSupportingAuthorization( signingKeys, requireRoot = true, ) val keysAuthorizeNamespace = - check.areValidAuthorizationKeys(signingKeys, requireRoot = true) - (ns -> (keysAuthorizeNamespace, keysWithDelegation)) + check.existsAuthorizedKeyIn(signingKeys, requireRoot = true) + (ns -> (keysAuthorizeNamespace, keysUsed)) }.toMap // Now let's determine which namespaces and uids actually delegated to any of the keys val namespaceAuthorizations = required.namespaces.map { ns => val check = getAuthorizationCheckForNamespace(ns) - val keysWithDelegation = check.getValidAuthorizationKeys( + val keysUsed = check.keysSupportingAuthorization( signingKeys, requireRoot = false, ) - val keysAuthorizeNamespace = check.areValidAuthorizationKeys(signingKeys, requireRoot = false) - (ns -> (keysAuthorizeNamespace, keysWithDelegation)) + val keysAuthorizeNamespace = check.existsAuthorizedKeyIn(signingKeys, requireRoot = false) + (ns -> (keysAuthorizeNamespace, keysUsed)) }.toMap val uidAuthorizations = required.uids.map { uid => val check = getAuthorizationCheckForNamespace(uid.namespace) - val keysWithDelegation = check.getValidAuthorizationKeys( + val keysUsed = check.keysSupportingAuthorization( signingKeys, requireRoot = false, ) val keysAuthorizeNamespace = - check.areValidAuthorizationKeys(signingKeys, requireRoot = false) + check.existsAuthorizedKeyIn(signingKeys, requireRoot = false) val keyForUid = getAuthorizedIdentifierDelegation(check, uid, toValidate.signatures.map(_.signedBy)) .map(_.mapping.target) - (uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysWithDelegation ++ keyForUid)) + (uid -> (keysAuthorizeNamespace || keyForUid.nonEmpty, keysUsed ++ keyForUid)) }.toMap val extraKeyAuthorizations = { @@ -132,7 +132,7 @@ trait TransactionAuthorizationValidator { .toMap } - val allAuthorizingKeys = + val allKeysUsedForAuthorization = (namespaceWithRootAuthorizations.values ++ namespaceAuthorizations.values ++ uidAuthorizations.values ++ @@ -145,9 +145,9 @@ trait TransactionAuthorizationValidator { logAuthorizations("Authorizations for UIDs", uidAuthorizations) logAuthorizations("Authorizations for extraKeys", extraKeyAuthorizations) - logger.debug(s"All authorizing keys: ${allAuthorizingKeys.keySet}") + logger.debug(s"All keys used for authorization: ${allKeysUsedForAuthorization.keySet}") - val superfluousKeys = signingKeys -- allAuthorizingKeys.keys + val superfluousKeys = signingKeys -- allKeysUsedForAuthorization.keys for { _ <- Either.cond[TopologyTransactionRejection, Unit]( // there must be at least 1 key used for the signatures for one of the delegation mechanisms @@ -160,7 +160,7 @@ trait TransactionAuthorizationValidator { }, ) - txWithValidSignatures <- toValidate + txWithSignaturesToVerify <- toValidate .removeSignatures(superfluousKeys) .toRight({ logger.info( @@ -169,9 +169,9 @@ trait TransactionAuthorizationValidator { TopologyTransactionRejection.NoDelegationFoundForKeys(superfluousKeys) }) - _ <- txWithValidSignatures.signatures.forgetNE.toList + _ <- txWithSignaturesToVerify.signatures.forgetNE.toList .traverse_(sig => - allAuthorizingKeys + allKeysUsedForAuthorization .get(sig.signedBy) .toRight({ val msg = @@ -182,7 +182,7 @@ trait TransactionAuthorizationValidator { .flatMap(key => pureCrypto .verifySignature( - txWithValidSignatures.hash.hash, + txWithSignaturesToVerify.hash.hash, key, sig, ) @@ -202,7 +202,7 @@ trait TransactionAuthorizationValidator { extraKeys = onlyFullyAuthorized(extraKeyAuthorizations), ) ( - txWithValidSignatures, + txWithSignaturesToVerify, requiredAuth .satisfiedByActualAuthorizers(actual) .fold(identity, _ => RequiredAuthAuthorizations.empty), @@ -236,7 +236,7 @@ trait TransactionAuthorizationValidator { ): Option[AuthorizedIdentifierDelegation] = { getIdentifierDelegationsForUid(uid) .find(aid => - authKeys(aid.mapping.target.id) && graph.areValidAuthorizationKeys( + authKeys(aid.mapping.target.id) && graph.existsAuthorizedKeyIn( aid.signingKeys, requireRoot = false, ) @@ -254,9 +254,7 @@ trait TransactionAuthorizationValidator { namespace: Namespace ): AuthorizationCheck = { val decentralizedNamespaceCheck = decentralizedNamespaceCache.get(namespace).map(_._2) - val namespaceCheck = namespaceCache.get( - namespace - ) + val namespaceCheck = namespaceCache.get(namespace) decentralizedNamespaceCheck .orElse(namespaceCheck) .getOrElse(AuthorizationCheck.empty) diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala index 5d8732703..21f758fb0 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionRejection.scala @@ -10,14 +10,9 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.OnboardingRestriction +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.EffectiveTime import com.digitalasset.canton.topology.transaction.TopologyMapping -import com.digitalasset.canton.topology.{ - DomainId, - Member, - ParticipantId, - PartyId, - TopologyManagerError, -} sealed trait TopologyTransactionRejection extends PrettyPrinting with Product with Serializable { def asString: String @@ -45,25 +40,12 @@ object TopologyTransactionRejection { TopologyManagerError.UnauthorizedTransaction.Failure(asString) } - final case class ThresholdTooHigh(actual: Int, mustBeAtMost: Int) - extends TopologyTransactionRejection { - override def asString: String = - s"Threshold must not be higher than $mustBeAtMost, but was $actual." - - override def pretty: Pretty[ThresholdTooHigh] = prettyOfString(_ => asString) - - override def toTopologyManagerError(implicit elc: ErrorLoggingContext) = { - TopologyManagerError.InvalidThreshold.ThresholdTooHigh(actual, mustBeAtMost) - } - } - final case class UnknownParties(parties: Seq[PartyId]) extends TopologyTransactionRejection { override def asString: String = s"Parties ${parties.sorted.mkString(", ")} are unknown." override def pretty: Pretty[UnknownParties.this.type] = prettyOfString(_ => asString) override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = TopologyManagerError.UnknownParties.Failure(parties) - } final case class OnboardingRestrictionInPlace( @@ -192,6 +174,25 @@ object TopologyTransactionRejection { ) } + final case class PartyExceedsHostingLimit( + partyId: PartyId, + limit: Int, + numParticipants: Int, + ) extends TopologyTransactionRejection { + override def asString: String = + s"Party $partyId exceeds hosting limit of $limit with desired number of $numParticipants hosting participants." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.PartyExceedsHostingLimit.Reject(partyId, limit, numParticipants) + + override def pretty: Pretty[PartyExceedsHostingLimit.this.type] = + prettyOfClass( + param("partyId", _.partyId), + param("limit", _.limit), + param("number of hosting participants", _.numParticipants), + ) + } + final case class MissingMappings(missing: Map[Member, Seq[TopologyMapping.Code]]) extends TopologyTransactionRejection { override def asString: String = { @@ -209,4 +210,24 @@ object TopologyTransactionRejection { override def pretty: Pretty[MissingMappings.this.type] = prettyOfString(_ => asString) } + + final case class MissingDomainParameters(effective: EffectiveTime) + extends TopologyTransactionRejection { + override def asString: String = s"Missing domain parameters at $effective" + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.MissingTopologyMapping.MissingDomainParameters(effective) + + override def pretty: Pretty[MissingDomainParameters.this.type] = prettyOfString(_ => asString) + } + + final case class NamespaceAlreadyInUse(namespace: Namespace) + extends TopologyTransactionRejection { + override def asString: String = s"The namespace $namespace is already used by another entity." + + override def toTopologyManagerError(implicit elc: ErrorLoggingContext): TopologyManagerError = + TopologyManagerError.NamespaceAlreadyInUse.Reject(namespace) + + override def pretty: Pretty[NamespaceAlreadyInUse.this.type] = prettyOfString(_ => asString) + } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala index c74fd1a00..63d7d0e04 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMapping.scala @@ -401,14 +401,14 @@ object NamespaceDelegation { target: SigningPublicKey, isRootDelegation: Boolean, ): NamespaceDelegation = - create(namespace, target, isRootDelegation).fold(err => sys.error(err), identity) + create(namespace, target, isRootDelegation).valueOr(err => + throw new IllegalArgumentException((err)) + ) def code: TopologyMapping.Code = Code.NamespaceDelegation /** Returns true if the given transaction is a self-signed root certificate */ def isRootCertificate(sit: GenericSignedTopologyTransaction): Boolean = { - ((sit.operation == TopologyChangeOp.Replace && sit.serial == PositiveInt.one) || - (sit.operation == TopologyChangeOp.Remove && sit.serial != PositiveInt.one)) && sit.mapping .select[transaction.NamespaceDelegation] .exists(ns => @@ -944,8 +944,8 @@ final case class PartyHostingLimits( override def code: Code = Code.PartyHostingLimits - override def namespace: Namespace = domainId.namespace - override def maybeUid: Option[UniqueIdentifier] = Some(domainId.uid) + override def namespace: Namespace = partyId.namespace + override def maybeUid: Option[UniqueIdentifier] = Some(partyId.uid) override def restrictedToDomain: Option[DomainId] = Some(domainId) @@ -1057,7 +1057,7 @@ object HostingParticipant { } yield HostingParticipant(participantId, permission) } -final case class PartyToParticipant( +final case class PartyToParticipant private ( partyId: PartyId, domainId: Option[DomainId], threshold: PositiveInt, @@ -1135,6 +1135,51 @@ final case class PartyToParticipant( object PartyToParticipant { + def create( + partyId: PartyId, + domainId: Option[DomainId], + threshold: PositiveInt, + participants: Seq[HostingParticipant], + groupAddressing: Boolean, + ): Either[String, PartyToParticipant] = { + val noDuplicatePParticipants = { + val duplicatePermissions = + participants.groupBy(_.participantId).values.filter(_.size > 1).toList + Either.cond( + duplicatePermissions.isEmpty, + (), + s"Participants may only be assigned one permission: $duplicatePermissions", + ) + } + val thresholdCanBeMet = { + val numConfirmingParticipants = + participants.count(_.permission >= ParticipantPermission.Confirmation) + Either + .cond( + // we allow to not meet the threshold criteria if there are only observing participants. + // but as soon as there is 1 confirming participant, the threshold must theoretically be satisfiable, + // otherwise the party can never confirm a transaction. + numConfirmingParticipants == 0 || threshold.value <= numConfirmingParticipants, + (), + s"Party $partyId cannot meet threshold of $threshold confirming participants with participants $participants", + ) + .map(_ => PartyToParticipant(partyId, domainId, threshold, participants, groupAddressing)) + } + + noDuplicatePParticipants.flatMap(_ => thresholdCanBeMet) + } + + def tryCreate( + partyId: PartyId, + domainId: Option[DomainId], + threshold: PositiveInt, + participants: Seq[HostingParticipant], + groupAddressing: Boolean, + ): PartyToParticipant = + create(partyId, domainId, threshold, participants, groupAddressing).valueOr(err => + throw new IllegalArgumentException(err) + ) + def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash = TopologyMapping.buildUniqueKey(code)( _.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive)) @@ -1158,7 +1203,7 @@ object PartyToParticipant { } // AuthorityOf -final case class AuthorityOf( +final case class AuthorityOf private ( partyId: PartyId, domainId: Option[DomainId], threshold: PositiveInt, @@ -1199,6 +1244,21 @@ final case class AuthorityOf( object AuthorityOf { + def create( + partyId: PartyId, + domainId: Option[DomainId], + threshold: PositiveInt, + parties: Seq[PartyId], + ): Either[String, AuthorityOf] = { + Either + .cond( + threshold.value <= parties.size, + (), + s"Invalid threshold $threshold for $partyId with authorizers $parties", + ) + .map(_ => AuthorityOf(partyId, domainId, threshold, parties)) + } + def uniqueKey(partyId: PartyId, domainId: Option[DomainId]): MappingHash = TopologyMapping.buildUniqueKey(code)( _.add(partyId.toProtoPrimitive).add(domainId.fold("")(_.toProtoPrimitive)) @@ -1217,7 +1277,9 @@ object AuthorityOf { if (value.domain.nonEmpty) DomainId.fromProtoPrimitive(value.domain, "domain").map(_.some) else Right(None) - } yield AuthorityOf(partyId, domainId, threshold, parties) + authorityOf <- create(partyId, domainId, threshold, parties) + .leftMap(ProtoDeserializationError.OtherError) + } yield authorityOf } /** Dynamic domain parameter settings for the domain diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala index 394cfddb0..490cf30d0 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyMappingChecks.scala @@ -5,14 +5,19 @@ package com.digitalasset.canton.topology.transaction import cats.data.EitherT import cats.instances.future.* +import cats.instances.order.* import cats.syntax.semigroup.* import com.digitalasset.canton.crypto.KeyPurpose import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.protocol.OnboardingRestriction +import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.processing.EffectiveTime import com.digitalasset.canton.topology.store.StoredTopologyTransactions.PositiveStoredTopologyTransactions +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ + InvalidTopologyMapping, + NamespaceAlreadyInUse, +} import com.digitalasset.canton.topology.store.{ TopologyStore, TopologyStoreId, @@ -24,7 +29,6 @@ import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil import scala.concurrent.{ExecutionContext, Future} -import scala.math.Ordered.* trait TopologyMappingChecks { def checkTransaction( @@ -127,6 +131,27 @@ class ValidatingTopologyMappingChecks( .select[TopologyChangeOp.Replace, AuthorityOf] .map(checkAuthorityOf(effective, _)) + case ( + Code.DecentralizedNamespaceDefinition, + None | Some(Code.DecentralizedNamespaceDefinition), + ) => + toValidate + .select[TopologyChangeOp.Replace, DecentralizedNamespaceDefinition] + .map( + checkDecentralizedNamespaceDefinitionReplace( + _, + inStore.flatMap(_.select[TopologyChangeOp, DecentralizedNamespaceDefinition]), + ) + ) + + case ( + Code.NamespaceDelegation, + None | Some(Code.NamespaceDelegation), + ) => + toValidate + .select[TopologyChangeOp.Replace, NamespaceDelegation] + .map(checkNamespaceDelegationReplace) + case otherwise => None } @@ -190,6 +215,33 @@ class ValidatingTopologyMappingChecks( ensureParticipantDoesNotHostParties(effective, toValidate.mapping.participantId) } + private def loadDomainParameters( + effective: EffectiveTime + )(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyTransactionRejection, DynamicDomainParameters] = { + loadFromStore(effective, DomainParametersState.code).subflatMap { domainParamCandidates => + val params = domainParamCandidates.result.view + .flatMap(_.selectMapping[DomainParametersState]) + .map(_.mapping.parameters) + .toList + params match { + case Nil => + logger.error( + "Can not determine domain parameters." + ) + Left(TopologyTransactionRejection.MissingDomainParameters(effective)) + case param :: Nil => Right(param) + case param :: rest => + logger.error( + s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one: $param." + ) + Right(param) + } + } + + } + private def checkDomainTrustCertificateReplace( effective: EffectiveTime, toValidate: SignedTopologyTransaction[TopologyChangeOp, DomainTrustCertificate], @@ -199,25 +251,7 @@ class ValidatingTopologyMappingChecks( def loadOnboardingRestriction() : EitherT[Future, TopologyTransactionRejection, OnboardingRestriction] = { - loadFromStore(effective, DomainParametersState.code).map { domainParamCandidates => - val restrictions = domainParamCandidates.result.view - .flatMap(_.selectMapping[DomainParametersState]) - .map(_.mapping.parameters.onboardingRestriction) - .toList - restrictions match { - case Nil => - logger.error( - "Can not determine the onboarding restriction. Assuming the domain is locked." - ) - OnboardingRestriction.RestrictedLocked - case param :: Nil => param - case param :: rest => - logger.error( - s"Multiple domain parameters at ${effective} ${rest.size + 1}. Using first one with restriction ${param}." - ) - param - } - } + loadDomainParameters(effective).map(_.onboardingRestriction) } def checkDomainIsNotLocked(restriction: OnboardingRestriction) = { @@ -311,65 +345,97 @@ class ValidatingTopologyMappingChecks( traceContext: TraceContext ): EitherT[Future, TopologyTransactionRejection, Unit] = { import toValidate.mapping - val numConfirmingParticipants = - mapping.participants.count(_.permission >= ParticipantPermission.Confirmation) + def checkParticipants() = { + val newParticipants = mapping.participants.map(_.participantId).toSet -- + inStore.toList.flatMap(_.mapping.participants.map(_.participantId)) + for { + participantTransactions <- EitherT.right[TopologyTransactionRejection]( + store + .findPositiveTransactions( + CantonTimestamp.MaxValue, + asOfInclusive = false, + isProposal = false, + types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code), + filterUid = Some(newParticipants.toSeq.map(_.uid)), + filterNamespace = None, + ) + ) - for { - // check the threshold - _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( - mapping.threshold.value <= numConfirmingParticipants, - TopologyTransactionRejection.ThresholdTooHigh( - mapping.threshold.value, - numConfirmingParticipants, - ), - ) + // check that all participants are known on the domain + missingParticipantCertificates = newParticipants -- participantTransactions + .collectOfMapping[DomainTrustCertificate] + .result + .map(_.mapping.participantId) - newParticipants = mapping.participants.map(_.participantId).toSet -- - inStore.toList.flatMap(_.mapping.participants.map(_.participantId)) - participantTransactions <- EitherT.right[TopologyTransactionRejection]( - store - .findPositiveTransactions( - CantonTimestamp.MaxValue, - asOfInclusive = false, - isProposal = false, - types = Seq(DomainTrustCertificate.code, OwnerToKeyMapping.code), - filterUid = Some(newParticipants.toSeq.map(_.uid)), - filterNamespace = None, - ) - ) + _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + missingParticipantCertificates.isEmpty, + TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq), + ) - // check that all participants are known on the domain - missingParticipantCertificates = newParticipants -- participantTransactions - .collectOfMapping[DomainTrustCertificate] - .result - .map(_.mapping.participantId) + // check that all known participants have keys registered + participantsWithInsufficientKeys = + newParticipants -- participantTransactions + .collectOfMapping[OwnerToKeyMapping] + .result + .view + .filter { tx => + val keyPurposes = tx.mapping.keys.map(_.purpose).toSet + requiredKeyPurposes.forall(keyPurposes) + } + .map(_.mapping.member) + .collect { case pid: ParticipantId => pid } + .toSeq - _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( - missingParticipantCertificates.isEmpty, - TopologyTransactionRejection.UnknownMembers(missingParticipantCertificates.toSeq), + _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + participantsWithInsufficientKeys.isEmpty, + TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq), + ) + } yield { + () + } + } + + def checkHostingLimits(effective: EffectiveTime) = for { + hostingLimitsCandidates <- loadFromStore( + effective, + code = PartyHostingLimits.code, + filterUid = Some(Seq(toValidate.mapping.partyId.uid)), ) + hostingLimits = hostingLimitsCandidates.result.view + .flatMap(_.selectMapping[PartyHostingLimits]) + .map(_.mapping.quota) + .toList + partyHostingLimit = hostingLimits match { + case Nil => // No hosting limits found. This is expected if no restrictions are in place + None + case quota :: Nil => Some(quota) + case multiple @ (quota :: _) => + logger.error( + s"Multiple PartyHostingLimits at ${effective} ${multiple.size}. Using first one with quota $quota." + ) + Some(quota) + } + // TODO(#14050) load default party hosting limits from dynamic domain parameters in case the party + // doesn't have a specific PartyHostingLimits mapping issued by the domain. + _ <- partyHostingLimit match { + case Some(limit) => + EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( + toValidate.mapping.participants.size <= limit, + TopologyTransactionRejection.PartyExceedsHostingLimit( + toValidate.mapping.partyId, + limit, + toValidate.mapping.participants.size, + ), + ) + case None => EitherTUtil.unit[TopologyTransactionRejection] + } + } yield () - // check that all known participants have keys registered - participantsWithInsufficientKeys = - newParticipants -- participantTransactions - .collectOfMapping[OwnerToKeyMapping] - .result - .view - .filter { tx => - val keyPurposes = tx.mapping.keys.map(_.purpose).toSet - requiredKeyPurposes.forall(keyPurposes) - } - .map(_.mapping.member) - .collect { case pid: ParticipantId => pid } - .toSeq + for { + _ <- checkParticipants() + _ <- checkHostingLimits(EffectiveTime.MaxValue) + } yield () - _ <- EitherTUtil.condUnitET[Future][TopologyTransactionRejection]( - participantsWithInsufficientKeys.isEmpty, - TopologyTransactionRejection.InsufficientKeys(participantsWithInsufficientKeys.toSeq), - ) - } yield { - () - } } private def checkOwnerToKeyMappingReplace( @@ -465,15 +531,7 @@ class ValidatingTopologyMappingChecks( val newMediators = (toValidate.mapping.allMediatorsInGroup.toSet -- inStore.toList.flatMap( _.mapping.allMediatorsInGroup )).map(identity[Member]) - - val thresholdCheck = EitherTUtil.condUnitET( - toValidate.mapping.threshold.value <= toValidate.mapping.active.size, - TopologyTransactionRejection.ThresholdTooHigh( - toValidate.mapping.threshold.value, - toValidate.mapping.active.size, - ), - ) - thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newMediators)) + checkMissingNsdAndOtkMappings(effectiveTime, newMediators) } private def checkSequencerDomainStateReplace( @@ -485,14 +543,7 @@ class ValidatingTopologyMappingChecks( _.mapping.allSequencers )).map(identity[Member]) - val thresholdCheck = EitherTUtil.condUnitET( - toValidate.mapping.threshold.value <= toValidate.mapping.active.size, - TopologyTransactionRejection.ThresholdTooHigh( - toValidate.mapping.threshold.value, - toValidate.mapping.active.size, - ), - ) - thresholdCheck.flatMap(_ => checkMissingNsdAndOtkMappings(effectiveTime, newSequencers)) + checkMissingNsdAndOtkMappings(effectiveTime, newSequencers) } private def checkAuthorityOf( @@ -521,15 +572,85 @@ class ValidatingTopologyMappingChecks( } } - val checkThreshold = { - val actual = toValidate.mapping.threshold.value - val mustBeAtMost = toValidate.mapping.parties.size - EitherTUtil.condUnitET( - actual <= mustBeAtMost, - TopologyTransactionRejection.ThresholdTooHigh(actual, mustBeAtMost), - ) + checkPartiesAreKnown() + } + + private def checkDecentralizedNamespaceDefinitionReplace( + toValidate: SignedTopologyTransaction[ + TopologyChangeOp.Replace, + DecentralizedNamespaceDefinition, + ], + inStore: Option[SignedTopologyTransaction[ + TopologyChangeOp, + DecentralizedNamespaceDefinition, + ]], + )(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = { + + def checkDecentralizedNamespaceDerivedFromOwners() + : EitherT[Future, TopologyTransactionRejection, Unit] = + if (inStore.isEmpty) { + // The very first decentralized namespace definition must have namespace computed from the owners + EitherTUtil.condUnitET( + toValidate.mapping.namespace == DecentralizedNamespaceDefinition + .computeNamespace(toValidate.mapping.owners), + InvalidTopologyMapping( + s"The decentralized namespace ${toValidate.mapping.namespace} is not derived from the owners ${toValidate.mapping.owners.toSeq.sorted}" + ), + ) + } else { + EitherTUtil.unit + } + + def checkNoClashWithRootCertificates()(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyTransactionRejection, Unit] = { + loadFromStore( + EffectiveTime.MaxValue, + Code.NamespaceDelegation, + filterUid = None, + filterNamespace = Some(Seq(toValidate.mapping.namespace)), + ).flatMap { namespaceDelegations => + val foundRootCertWithSameNamespace = namespaceDelegations.result.exists(stored => + NamespaceDelegation.isRootCertificate(stored.transaction) + ) + EitherTUtil.condUnitET( + !foundRootCertWithSameNamespace, + NamespaceAlreadyInUse(toValidate.mapping.namespace), + ) + } + } + + for { + _ <- checkDecentralizedNamespaceDerivedFromOwners() + _ <- checkNoClashWithRootCertificates() + } yield () + } + + private def checkNamespaceDelegationReplace( + toValidate: SignedTopologyTransaction[ + TopologyChangeOp.Replace, + NamespaceDelegation, + ] + )(implicit traceContext: TraceContext): EitherT[Future, TopologyTransactionRejection, Unit] = { + def checkNoClashWithDecentralizedNamespaces()(implicit + traceContext: TraceContext + ): EitherT[Future, TopologyTransactionRejection, Unit] = { + EitherTUtil.ifThenET(NamespaceDelegation.isRootCertificate(toValidate)) { + loadFromStore( + EffectiveTime.MaxValue, + Code.DecentralizedNamespaceDefinition, + filterUid = None, + filterNamespace = Some(Seq(toValidate.mapping.namespace)), + ).flatMap { dns => + val foundDecentralizedNamespaceWithSameNamespace = dns.result.nonEmpty + EitherTUtil.condUnitET( + !foundDecentralizedNamespaceWithSameNamespace, + NamespaceAlreadyInUse(toValidate.mapping.namespace), + ) + } + } } - checkThreshold.flatMap(_ => checkPartiesAreKnown()) + checkNoClashWithDecentralizedNamespaces() } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/tracing/TracedScaffeine.scala b/community/base/src/main/scala/com/digitalasset/canton/tracing/TracedScaffeine.scala index 18ca2850e..3dcc9e9dc 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/tracing/TracedScaffeine.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/tracing/TracedScaffeine.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.tracing import com.daml.scalautil.Statement.discard import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.lifecycle.FutureUnlessShutdownImpl.AbortedDueToShutdownException import com.digitalasset.canton.logging.TracedLogger import com.github.blemale.scaffeine.{AsyncLoadingCache, Scaffeine} @@ -75,8 +74,10 @@ class TracedAsyncLoadingCache[K, V]( )(tracedLogger: TracedLogger) { implicit private[this] val ec: ExecutionContext = DirectExecutionContext(tracedLogger) - /** @see com.github.blemale.scaffeine.AsyncLoadingCache.get - */ + /* + * See com.github.blemale.scaffeine.AsyncLoadingCache.get + * If shutting down the future returned will be failed with a AbortedDueToShutdownException + */ def get(key: K)(implicit traceContext: TraceContext): Future[V] = underlying.get(TracedKey(key)(traceContext)) @@ -85,12 +86,14 @@ class TracedAsyncLoadingCache[K, V]( discard(underlying.synchronous().asMap().filterInPlace((t, v) => !filter(t.key, v))) } - def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] = { + def getUS(key: K)(implicit traceContext: TraceContext): FutureUnlessShutdown[V] = FutureUnlessShutdown.transformAbortedF(get(key)) - } - /** @see com.github.blemale.scaffeine.AsyncLoadingCache.getAll - */ + /* + * See com.github.blemale.scaffeine.AsyncLoadingCache.getAll + * If shutting down the future returned will be failed with a AbortedDueToShutdownException wrapped inside + * a java.util.concurrent.CompletionException + */ def getAll(keys: Iterable[K])(implicit traceContext: TraceContext): Future[Map[K, V]] = underlying .getAll(keys.map(TracedKey(_)(traceContext))) @@ -98,16 +101,9 @@ class TracedAsyncLoadingCache[K, V]( def getAllUS( keys: Iterable[K] - )(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] = - try - FutureUnlessShutdown.outcomeF( - underlying - .getAll(keys.map(TracedKey(_)(traceContext))) - .map(_.map { case (tracedKey, value) => tracedKey.key -> value })(ec) - ) - catch { - case _: AbortedDueToShutdownException => FutureUnlessShutdown.abortedDueToShutdown - } + )(implicit traceContext: TraceContext): FutureUnlessShutdown[Map[K, V]] = { + FutureUnlessShutdown.transformAbortedF(getAll(keys)) + } override def toString = s"TracedAsyncLoadingCache($underlying)" } diff --git a/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala b/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala index 68bef892d..b7c25d564 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/version/HasVersionedWrapper.scala @@ -62,7 +62,10 @@ trait HasVersionedWrapper[ValueClass] extends HasVersionedToByteString { def toByteArray(version: ProtocolVersion): Array[Byte] = toByteString(version).toByteArray /** Writes the byte string representation of the corresponding `UntypedVersionedMessage` wrapper of this instance to a file. */ - def writeToFile(outputFile: String, version: ProtocolVersion = ProtocolVersion.latest): Unit = { + def writeToFile( + outputFile: String, + version: ProtocolVersion = ProtocolVersion.latest, + ): Unit = { val bytes = toByteString(version) BinaryFileUtil.writeByteStringToFile(outputFile, bytes) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala b/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala index 52761dcef..948e6c1a7 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/version/ProtocolVersion.scala @@ -9,7 +9,14 @@ import com.digitalasset.canton.ProtoDeserializationError.OtherError import com.digitalasset.canton.buildinfo.BuildInfo import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.version.ProtocolVersion.{deleted, deprecated, supported, unstable} +import com.digitalasset.canton.version.ProtocolVersion.{ + beta, + deleted, + deprecated, + stable, + supported, + unstable, +} import pureconfig.error.FailureReason import pureconfig.{ConfigReader, ConfigWriter} import slick.jdbc.{GetResult, PositionedParameters, SetParameter} @@ -48,7 +55,13 @@ import slick.jdbc.{GetResult, PositionedParameters, SetParameter} * As a result, you may have to modify a couple of protobuf definitions and mark them as stable as well. * * - Remove `v` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]] - * and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.protocolVersions]]. + * and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.stableProtocolVersions]]. + * + * How to release a protocol version `N` as Beta: + * - Switch the type parameter of the protocol version constant `v` from + * [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Unstable]] to [[com.digitalasset.canton.version.ProtocolVersionAnnotation.Beta]] + * - Remove `v` from [[com.digitalasset.canton.version.ProtocolVersion.unstable]] + * and add it to [[com.digitalasset.canton.buildinfo.BuildInfo.betaProtocolVersions]]. * * - Check the test jobs for protocol versions: * Likely `N` will become the default protocol version used by the `test` job, @@ -67,7 +80,10 @@ sealed case class ProtocolVersion private[version] (v: Int) def isDeprecated: Boolean = deprecated.contains(this) def isUnstable: Boolean = unstable.contains(this) - def isStable: Boolean = !isUnstable + + def isBeta: Boolean = beta.contains(this) + + def isStable: Boolean = stable.contains(this) def isDeleted: Boolean = deleted.contains(this) @@ -90,12 +106,18 @@ object ProtocolVersion { type Status = S } - private[version] def stable(v: Int): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Stable] = + private[version] def createStable( + v: Int + ): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Stable] = createWithStatus[ProtocolVersionAnnotation.Stable](v) - private[version] def unstable( + private[version] def createUnstable( v: Int ): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] = createWithStatus[ProtocolVersionAnnotation.Unstable](v) + private[version] def createBeta( + v: Int + ): ProtocolVersionWithStatus[ProtocolVersionAnnotation.Beta] = + createWithStatus[ProtocolVersionAnnotation.Beta](v) private def createWithStatus[S <: ProtocolVersionAnnotation.Status]( v: Int @@ -121,12 +143,9 @@ object ProtocolVersion { pv: ProtocolVersion, includeDeleted: Boolean = false, ) = { - val supportedStablePVs = stableAndSupported.map(_.toString) + val deleted = Option.when(includeDeleted)(ProtocolVersion.deleted.forgetNE).getOrElse(Nil) - val supportedPVs = if (includeDeleted) { - val deletedPVs = deleted.map(pv => s"(${pv.toString})") - supportedStablePVs ++ deletedPVs - } else supportedStablePVs + val supportedPVs: NonEmpty[List[String]] = (supported ++ deleted).map(_.toString) s"Protocol version $pv is not supported. The supported versions are ${supportedPVs.mkString(", ")}." } @@ -201,13 +220,11 @@ object ProtocolVersion { // All stable protocol versions supported by this release // TODO(#15561) Switch to non-empty again - val stableAndSupported: List[ProtocolVersion] = - BuildInfo.protocolVersions - .map(parseUnchecked) - .map(_.valueOr(sys.error)) - .toList + val stable: List[ProtocolVersion] = + parseFromBuildInfo(BuildInfo.stableProtocolVersions.toSeq) private val deprecated: Seq[ProtocolVersion] = Seq() + private val deleted: NonEmpty[Seq[ProtocolVersion]] = NonEmpty( Seq, @@ -222,27 +239,36 @@ object ProtocolVersion { val unstable: NonEmpty[List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable]]] = NonEmpty.mk(List, ProtocolVersion.v31, ProtocolVersion.dev) - val supported: NonEmpty[List[ProtocolVersion]] = (unstable ++ stableAndSupported).sorted + val beta: List[ProtocolVersionWithStatus[ProtocolVersionAnnotation.Beta]] = + parseFromBuildInfo(BuildInfo.betaProtocolVersions.toSeq) + .map(pv => ProtocolVersion.createBeta(pv.v)) + + val supported: NonEmpty[List[ProtocolVersion]] = (unstable ++ beta ++ stable).sorted - private val allProtocolVersions = deprecated ++ deleted ++ unstable ++ stableAndSupported + private val allProtocolVersions = deprecated ++ deleted ++ unstable ++ beta ++ stable require( allProtocolVersions.sizeCompare(allProtocolVersions.distinct) == 0, s"All the protocol versions should be distinct." + - s"Found: ${Map("deprecated" -> deprecated, "deleted" -> deleted, "unstable" -> unstable, "stable" -> stableAndSupported)}", + s"Found: ${Map("deprecated" -> deprecated, "deleted" -> deleted, "unstable" -> unstable, "stable" -> stable)}", ) // TODO(i15561): change back to `stableAndSupported.max1` once there is a stable Daml 3 protocol version - val latest: ProtocolVersion = stableAndSupported.lastOption.getOrElse(unstable.head1) + val latest: ProtocolVersion = stable.lastOption.getOrElse(unstable.head1) lazy val dev: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] = - ProtocolVersion.unstable(Int.MaxValue) + ProtocolVersion.createUnstable(Int.MaxValue) lazy val v31: ProtocolVersionWithStatus[ProtocolVersionAnnotation.Unstable] = - ProtocolVersion.unstable(31) + ProtocolVersion.createUnstable(31) // Minimum stable protocol version introduced lazy val minimum: ProtocolVersion = v31 + + private def parseFromBuildInfo(pv: Seq[String]): List[ProtocolVersion] = + pv.map(parseUnchecked) + .map(_.valueOr(sys.error)) + .toList } /* diff --git a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/PackageVersion.java b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/PackageVersion.java new file mode 100644 index 000000000..0a7ea9ff9 --- /dev/null +++ b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/PackageVersion.java @@ -0,0 +1,65 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data; + +import org.checkerframework.checker.nullness.qual.NonNull; + +import java.util.Arrays; +import java.util.stream.Collectors; + +public class PackageVersion implements Comparable { + private final int[] segments; + + /** + * Creates a PackageVersion from the provided segments. + * + *

This method is meant only for internal API usage. It is marked unsafe as it does not + * validate the input according to the accepted ledger format of PackageVersion. + */ + public PackageVersion(int[] segments) { + this.segments = segments; + } + + /** + * Parses the provided String value into a PackageVersion. + * + *

This method is meant only for internal API usage. It is marked unsafe as it does not + * validate the input according to the accepted ledger format of PackageVersion. + */ + public static PackageVersion unsafeFromString(@NonNull String version) { + String[] parts = version.split("\\."); + int[] segments = new int[parts.length]; + for (int i = 0; i < parts.length; i++) { + segments[i] = Integer.parseInt(parts[i]); + if (segments[i] < 0) { + throw new IllegalArgumentException( + "Invalid version. No negative segments allowed: " + version); + } + } + return new PackageVersion(segments); + } + + @Override + public int compareTo(PackageVersion other) { + return Arrays.compare(this.segments, other.segments); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PackageVersion that = (PackageVersion) o; + return Arrays.equals(segments, that.segments); + } + + @Override + public int hashCode() { + return Arrays.hashCode(segments); + } + + @Override + public String toString() { + return Arrays.stream(segments).mapToObj(Integer::toString).collect(Collectors.joining(".")); + } +} diff --git a/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/PackageVersionSpec.scala b/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/PackageVersionSpec.scala new file mode 100644 index 000000000..ab0774aae --- /dev/null +++ b/community/bindings-java/src/test/scala/com/daml/ledger/javaapi/data/PackageVersionSpec.scala @@ -0,0 +1,43 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.daml.ledger.javaapi.data + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +import scala.util.Random + +class PackageVersionSpec extends AnyFlatSpec with Matchers { + + "PackageVersion" should "be parsed correctly from String" in { + val packageVersion = PackageVersion.unsafeFromString("1.22.333") + packageVersion.toString shouldBe "1.22.333" + packageVersion shouldBe new PackageVersion(Array(1, 22, 333)) + } + + "PackageVersion" should "not allow negative or non-integers" in { + an[IllegalArgumentException] should be thrownBy PackageVersion.unsafeFromString("0.-1") + an[IllegalArgumentException] should be thrownBy PackageVersion.unsafeFromString("0.beef") + } + + "PackageVersion" should "be ordered correctly" in { + + val expectedOrderedPackageVersions = Seq( + // Lowest possible package version + PackageVersion.unsafeFromString("0"), + PackageVersion.unsafeFromString("0.1"), + PackageVersion.unsafeFromString("0.11"), + PackageVersion.unsafeFromString("1.0"), + PackageVersion.unsafeFromString("2"), + PackageVersion.unsafeFromString("10"), + PackageVersion.unsafeFromString(s"${Int.MaxValue}"), + PackageVersion.unsafeFromString(s"${Int.MaxValue}.3"), + PackageVersion.unsafeFromString(s"${Int.MaxValue}." * 23 + "99"), + ) + + Random + .shuffle(expectedOrderedPackageVersions) + .sorted should contain theSameElementsInOrderAs expectedOrderedPackageVersions + } +} diff --git a/community/common/src/main/daml/CantonExamples/daml.yaml b/community/common/src/main/daml/CantonExamples/daml.yaml index ad8a8da17..50dd7d8af 100644 --- a/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: CantonExamples diff --git a/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql b/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql index 25bcea71c..191b97b81 100644 --- a/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql +++ b/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql @@ -650,9 +650,9 @@ create table sequencer_lower_bound ( create table sequencer_events ( ts bigint primary key, node_index smallint not null, - -- single char to indicate the event type: D for deliver event, E for deliver error + -- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt event_type char(1) not null - constraint event_type_enum check (event_type = 'D' or event_type = 'E'), + constraint event_type_enum check (event_type IN ('D', 'E', 'R')), message_id varchar null, sender integer null, -- null if event goes to everyone, otherwise specify member ids of recipients @@ -921,6 +921,8 @@ create table seq_traffic_control_consumed_journal ( extra_traffic_consumed bigint not null, -- base traffic remainder at sequencing_timestamp base_traffic_remainder bigint not null, + -- the last cost consumed at sequencing_timestamp + last_consumed_cost bigint not null, -- traffic entries have a unique sequencing_timestamp per member primary key (member, sequencing_timestamp) ); diff --git a/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 b/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 index e36b96aa9..033aefc9a 100644 --- a/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 +++ b/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sha256 @@ -1 +1 @@ -8347bf5092167e6a3df9d8f3cf1d0054a779e272589f7c0f3aad50cca8f8736a +1923effb9fa5d583e6c188f401e708a5e9c03b725ed988d0928a0b61660854a2 diff --git a/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql b/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql index bbc4bd3eb..7d59c1bef 100644 --- a/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql +++ b/community/common/src/main/resources/db/migration/canton/h2/stable/V2__lapi_3.0.sql @@ -81,7 +81,8 @@ CREATE TABLE lapi_command_completions ( trace_context BINARY LARGE OBJECT ); -CREATE INDEX lapi__command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset); +CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset); +CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset); --------------------------------------------------------------------------------------------------- -- Events: create diff --git a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql index f1027da3f..d227fdf5e 100644 --- a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql +++ b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql @@ -673,9 +673,9 @@ create table sequencer_lower_bound ( create table sequencer_events ( ts bigint primary key, node_index smallint not null, - -- single char to indicate the event type: D for deliver event, E for deliver error + -- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt event_type char(1) not null - constraint event_type_enum check (event_type = 'D' or event_type = 'E'), + constraint event_type_enum check (event_type IN ('D', 'E', 'R')), message_id varchar(300) collate "C" null, sender integer null, -- null if event goes to everyone, otherwise specify member ids of recipients @@ -935,6 +935,8 @@ create table seq_traffic_control_consumed_journal ( extra_traffic_consumed bigint not null, -- base traffic remainder at sequencing_timestamp base_traffic_remainder bigint not null, + -- the last cost consumed at sequencing_timestamp + last_consumed_cost bigint not null, -- traffic entries have a unique sequencing_timestamp per member primary key (member, sequencing_timestamp) ); diff --git a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 index d00f26676..27ad6db2b 100644 --- a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 +++ b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sha256 @@ -1 +1 @@ -22559de6824376d64006305601db270b57afafb1eccc05e041e55bf3cb858e30 +1f50894cad8a5ce3e65f5e6b0a48484d2cf0cd7cc354fc6b0aa9cdda97d9e6d3 diff --git a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql index dccdae829..3688421a7 100644 --- a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql +++ b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql @@ -669,7 +669,8 @@ create or replace view debug.seq_traffic_control_consumed_journal as member, debug.canton_timestamp(sequencing_timestamp) as sequencing_timestamp, extra_traffic_consumed, - base_traffic_remainder + base_traffic_remainder, + last_consumed_cost from seq_traffic_control_consumed_journal; create or replace view debug.seq_traffic_control_initial_timestamp as diff --git a/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 b/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 index af6ab36f0..e29ee521b 100644 --- a/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 +++ b/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sha256 @@ -1 +1 @@ -f4d58cc709e08a2081d761637ea8d27393decb4ed1a6f4ee8ecf4843a838eab0 +d1c0b524698a1e1249785b0fe973f21f5542020215b49c4012bd774e310fb82e diff --git a/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql b/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql index 55665e5d6..73a3bc78b 100644 --- a/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql +++ b/community/common/src/main/resources/db/migration/canton/postgres/stable/V2_0__lapi_3.0.sql @@ -100,6 +100,7 @@ CREATE TABLE lapi_command_completions ( ); CREATE INDEX lapi_command_completions_application_id_offset_idx ON lapi_command_completions USING btree (application_id, completion_offset); +CREATE INDEX lapi_command_completions_offset_idx ON lapi_command_completions USING btree (completion_offset); --------------------------------------------------------------------------------------------------- -- Events: Assign diff --git a/community/common/src/main/scala/com/digitalasset/canton/config/ProtocolConfig.scala b/community/common/src/main/scala/com/digitalasset/canton/config/ProtocolConfig.scala index a196de580..382a698f7 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/config/ProtocolConfig.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/config/ProtocolConfig.scala @@ -5,5 +5,6 @@ package com.digitalasset.canton.config trait ProtocolConfig { def devVersionSupport: Boolean + def betaVersionSupport: Boolean def dontWarnOnDeprecatedPV: Boolean } diff --git a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionConfirmationRequest.scala b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionConfirmationRequest.scala index 8dec8307e..c44b86549 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionConfirmationRequest.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/protocol/messages/TransactionConfirmationRequest.scala @@ -12,7 +12,6 @@ import com.digitalasset.canton.sequencing.protocol.{ Batch, MediatorGroupRecipient, OpenEnvelope, - ParticipantsOfParty, Recipients, } import com.digitalasset.canton.topology.client.TopologySnapshot @@ -57,32 +56,13 @@ final case class TransactionConfirmationRequest( val rootHashMessageEnvelopes = NonEmpty.from(recipientsOfRootHashMessage) match { case Some(recipientsNE) => - // TODO(#13883) Use BCC also for group addresses - // val groupsWithMediator = - // recipientsOfRootHashMessage.map(recipient => NonEmpty(Set, recipient, mediatorRecipient)) - // val rootHashMessageEnvelope = OpenEnvelope( - // rootHashMessage, - // Recipients.recipientGroups(NonEmptyUtil.fromUnsafe(groupsWithMediator)), - // )(protocolVersion) - val groupAddressing = recipientsOfRootHashMessage.exists { - case ParticipantsOfParty(_) => true - case _ => false - } - // if using group addressing, we just place all recipients in one group instead of separately as before (it was separate for legacy reasons) - val rootHashMessageRecipients = - if (groupAddressing) - Recipients.recipientGroups( - NonEmpty.mk(Seq, recipientsNE.toSet ++ Seq(mediator)) - ) - else - Recipients.recipientGroups( - recipientsNE.map(NonEmpty.mk(Set, _, mediator)) - ) - List( - OpenEnvelope(rootHashMessage(ipsSnapshot.timestamp), rootHashMessageRecipients)( - protocolVersion - ) - ) + val groupsWithMediator = recipientsNE.map(NonEmpty(Set, _, mediator)) + val rootHashMessageEnvelope = OpenEnvelope( + rootHashMessage(ipsSnapshot.timestamp), + Recipients.recipientGroups(groupsWithMediator), + )(protocolVersion) + + List(rootHashMessageEnvelope) case None => loggingContext.warn("Confirmation request without root hash message recipients") List.empty diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala index 3c32c464f..8ffa39d47 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/QueueBasedDomainOutbox.scala @@ -117,7 +117,7 @@ class QueueBasedDomainOutbox( private def hasUnsentTransactions: Boolean = domainOutboxQueue.numUnsentTransactions > 0 - def newTransactionsAddedToAuthorizedStore( + def newTransactionsAdded( asOf: CantonTimestamp, num: Int, ): FutureUnlessShutdown[Unit] = { diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/StoreBasedDomainOutbox.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/StoreBasedDomainOutbox.scala index a636bdcf9..0643a01c3 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/topology/StoreBasedDomainOutbox.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/StoreBasedDomainOutbox.scala @@ -162,7 +162,7 @@ class StoreBasedDomainOutbox( final def queueSize: Int = watermarks.get().queuedApprox - final def newTransactionsAddedToAuthorizedStore( + final def newTransactionsAdded( asOf: CantonTimestamp, num: Int, ): FutureUnlessShutdown[Unit] = { @@ -375,7 +375,7 @@ abstract class DomainOutbox extends DomainOutboxHandle { def targetClient: DomainTopologyClientWithInit - def newTransactionsAddedToAuthorizedStore( + def newTransactionsAdded( asOf: CantonTimestamp, num: Int, ): FutureUnlessShutdown[Unit] @@ -396,7 +396,7 @@ class DomainOutboxDynamicObserver(val loggerFactory: NamedLoggerFactory) transactions: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { outboxRef.get.fold(FutureUnlessShutdown.unit)( - _.newTransactionsAddedToAuthorizedStore(timestamp, transactions.size) + _.newTransactionsAdded(timestamp, transactions.size) ) } diff --git a/community/common/src/main/scala/com/digitalasset/canton/version/ProtocolVersionCompatibility.scala b/community/common/src/main/scala/com/digitalasset/canton/version/ProtocolVersionCompatibility.scala index 506054587..a12047dc2 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/version/ProtocolVersionCompatibility.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/version/ProtocolVersionCompatibility.scala @@ -26,9 +26,12 @@ object ProtocolVersionCompatibility { cantonNodeParameters: CantonNodeParameters, release: ReleaseVersion = ReleaseVersion.current, ): NonEmpty[List[ProtocolVersion]] = { - val unstable = + val unstableAndBeta = if (cantonNodeParameters.devVersionSupport && cantonNodeParameters.nonStandardConfig) - ProtocolVersion.unstable.forgetNE + ProtocolVersion.unstable.forgetNE ++ ReleaseVersionToProtocolVersions + .getBetaProtocolVersions(release) + else if (cantonNodeParameters.betaVersionSupport) + ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release) else List.empty ReleaseVersionToProtocolVersions.getOrElse( @@ -36,15 +39,23 @@ object ProtocolVersionCompatibility { sys.error( s"Please add the supported protocol versions of a participant of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`." ), - ) ++ unstable + ) ++ unstableAndBeta } - /** Returns the protocol versions supported by the participant of the current release. + /** Returns the protocol versions supported by the participant of the specified release. + * includeUnstableVersions: include unstable versions + * includeBetaVersions: include Beta versions */ def supportedProtocolsParticipant( includeUnstableVersions: Boolean, + includeBetaVersions: Boolean, release: ReleaseVersion, ): NonEmpty[List[ProtocolVersion]] = { + val beta = + if (includeBetaVersions) + ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release) + else List.empty + val unstable = if (includeUnstableVersions) ProtocolVersion.unstable.forgetNE @@ -55,7 +66,7 @@ object ProtocolVersionCompatibility { sys.error( s"Please add the supported protocol versions of a participant of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`." ), - ) ++ unstable + ) ++ beta ++ unstable } /** Returns the protocol versions supported by the domain of the current release. @@ -65,9 +76,12 @@ object ProtocolVersionCompatibility { cantonNodeParameters: CantonNodeParameters, release: ReleaseVersion = ReleaseVersion.current, ): NonEmpty[List[ProtocolVersion]] = { - val unstable = + val unstableAndBeta = if (cantonNodeParameters.devVersionSupport && cantonNodeParameters.nonStandardConfig) - ProtocolVersion.unstable.forgetNE + ProtocolVersion.unstable.forgetNE ++ ReleaseVersionToProtocolVersions + .getBetaProtocolVersions(release) + else if (cantonNodeParameters.betaVersionSupport) + ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release) else List.empty ReleaseVersionToProtocolVersions.getOrElse( @@ -75,16 +89,23 @@ object ProtocolVersionCompatibility { sys.error( s"Please add the supported protocol versions of domain nodes of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`." ), - ) ++ unstable + ) ++ unstableAndBeta } - /** Returns the protocol versions supported by the domain of the current release. - * Fails if no stable protocol versions are found + /** Returns the protocol versions supported by the domain of the specified release. + * includeUnstableVersions: include unstable versions + * includeBetaVersions: include beta versions */ def trySupportedProtocolsDomain( includeUnstableVersions: Boolean, + includeBetaVersions: Boolean, release: ReleaseVersion, ): NonEmpty[List[ProtocolVersion]] = { + val beta = + if (includeBetaVersions) + ReleaseVersionToProtocolVersions.getBetaProtocolVersions(release) + else List.empty + val unstable = if (includeUnstableVersions) ProtocolVersion.unstable.forgetNE @@ -95,7 +116,7 @@ object ProtocolVersionCompatibility { sys.error( s"Please add the supported protocol versions of domain nodes of release version $release to `majorMinorToProtocolVersions` in `ReleaseVersionToProtocolVersions.scala`." ), - ) ++ unstable + ) ++ beta ++ unstable } final case class UnsupportedVersion(version: ProtocolVersion, supported: Seq[ProtocolVersion]) @@ -222,6 +243,7 @@ object DomainProtocolVersion { ProtocolVersionCompatibility .trySupportedProtocolsDomain( includeUnstableVersions = true, + includeBetaVersions = true, release = ReleaseVersion.current, ) .contains(version), @@ -229,7 +251,8 @@ object DomainProtocolVersion { UnsupportedVersion( version, ProtocolVersionCompatibility.trySupportedProtocolsDomain( - includeUnstableVersions = false, + includeUnstableVersions = true, + includeBetaVersions = true, release = ReleaseVersion.current, ), ), @@ -261,6 +284,7 @@ object ParticipantProtocolVersion { ProtocolVersionCompatibility .supportedProtocolsParticipant( includeUnstableVersions = true, + includeBetaVersions = true, release = ReleaseVersion.current, ) .contains(version), @@ -268,7 +292,8 @@ object ParticipantProtocolVersion { UnsupportedVersion( version, ProtocolVersionCompatibility.supportedProtocolsParticipant( - includeUnstableVersions = false, + includeUnstableVersions = true, + includeBetaVersions = true, release = ReleaseVersion.current, ), ), diff --git a/community/common/src/main/scala/com/digitalasset/canton/version/ReleaseVersionToProtocolVersions.scala b/community/common/src/main/scala/com/digitalasset/canton/version/ReleaseVersionToProtocolVersions.scala index f2f3a8d16..1a600b0ce 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/version/ReleaseVersionToProtocolVersions.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/version/ReleaseVersionToProtocolVersions.scala @@ -10,46 +10,42 @@ object ReleaseVersionToProtocolVersions { private val v3 = ProtocolVersion(3) private val v4 = ProtocolVersion(4) private val v5 = ProtocolVersion(5) + private val v6 = ProtocolVersion(6) private val v30 = ProtocolVersion(30) import ProtocolVersion.* // For each (major, minor) the list of supported protocol versions // Don't make this variable private because it's used in `console-reference.canton` - val majorMinorToProtocolVersions: Map[(Int, Int), NonEmpty[List[ProtocolVersion]]] = Map( - ReleaseVersions.v2_0_0 -> List(v2), - ReleaseVersions.v2_1_0 -> List(v2), - ReleaseVersions.v2_2_0 -> List(v2), - ReleaseVersions.v2_3_0 -> List(v2, v3), - ReleaseVersions.v2_4_0 -> List(v2, v3), - ReleaseVersions.v2_5_0 -> List(v2, v3, v4), - ReleaseVersions.v2_6_0 -> List(v3, v4), - ReleaseVersions.v2_7_0 -> List(v3, v4, v5), - ReleaseVersions.v2_8_0 -> List(v3, v4, v5), - ReleaseVersions.v2_9_0 -> List(v3, v4, v5), - ReleaseVersions.v3_0_0 -> List(v30), - ReleaseVersions.v3_1_0_snapshot -> List(v31), + val majorMinorToStableProtocolVersions: Map[(Int, Int), NonEmpty[List[ProtocolVersion]]] = + Map( + ReleaseVersions.v2_0_0 -> List(v2), + ReleaseVersions.v2_1_0 -> List(v2), + ReleaseVersions.v2_2_0 -> List(v2), + ReleaseVersions.v2_3_0 -> List(v2, v3), + ReleaseVersions.v2_4_0 -> List(v2, v3), + ReleaseVersions.v2_5_0 -> List(v2, v3, v4), + ReleaseVersions.v2_6_0 -> List(v3, v4), + ReleaseVersions.v2_7_0 -> List(v3, v4, v5), + ReleaseVersions.v2_8_0 -> List(v3, v4, v5), + ReleaseVersions.v2_9_0 -> List(v5), + ReleaseVersions.v3_0_0 -> List(v30), + ReleaseVersions.v3_1_0_snapshot -> List(v31), + ).map { case (release, pvs) => (release.majorMinor, NonEmptyUtil.fromUnsafe(pvs)) } + + val majorMinorToBetaProtocolVersions: Map[(Int, Int), NonEmpty[List[ProtocolVersion]]] = Map( + ReleaseVersions.v2_9_0 -> List(v6) ).map { case (release, pvs) => (release.majorMinor, NonEmptyUtil.fromUnsafe(pvs)) } - def get( - releaseVersion: ReleaseVersion, - includeDeletedProtocolVersions: Boolean = false, - ): Option[NonEmpty[List[ProtocolVersion]]] = { - val allVersions = majorMinorToProtocolVersions - .get(releaseVersion.majorMinor) - - if (includeDeletedProtocolVersions) - allVersions - else - majorMinorToProtocolVersions - .get(releaseVersion.majorMinor) - .map(_.filterNot(_.isDeleted)) - .flatMap(NonEmpty.from) - } - def getOrElse( releaseVersion: ReleaseVersion, default: => NonEmpty[List[ProtocolVersion]], ): NonEmpty[List[ProtocolVersion]] = - majorMinorToProtocolVersions.getOrElse(releaseVersion.majorMinor, default) + majorMinorToStableProtocolVersions.getOrElse(releaseVersion.majorMinor, default) + + def getBetaProtocolVersions(releaseVersion: ReleaseVersion): List[ProtocolVersion] = + majorMinorToBetaProtocolVersions + .get(releaseVersion.majorMinor) + .map(_.forgetNE) + .getOrElse(Nil) } diff --git a/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala b/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala index 5426326dc..7bc3e5ace 100644 --- a/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala +++ b/community/common/src/test/java/com/digitalasset/canton/sequencing/protocol/RecipientsTreeTest.scala @@ -19,7 +19,7 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest { lazy val p6: Member = ParticipantId("participant6") lazy val alice = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::party")) - lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"alice::bob")) + lazy val bob = PartyId(UniqueIdentifier.tryFromProtoPrimitive(s"bob::party")) lazy val pop1: ParticipantsOfParty = ParticipantsOfParty(alice) lazy val pop2: ParticipantsOfParty = ParticipantsOfParty(bob) @@ -51,6 +51,16 @@ class RecipientsTreeTest extends AnyWordSpec with BaseTest { t5.forMember(p5, Set(pop1)) shouldBe List(t5) } } + + "allPaths" should { + "give all paths within the tree" in { + t5.allPaths shouldBe Seq( + Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p1), rec(p5))), + Seq(Set(rec(p1), pop1), Set(rec(p4), rec(p2), pop2), Set(rec(p3))), + Seq(Set(rec(p1), pop1), Set(rec(p2), rec(p6), pop2)), + ) + } + } } "serialization and deserialization" should { diff --git a/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala b/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala index b9cc4bca6..7b0d3be7b 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTrafficData.scala @@ -53,12 +53,14 @@ final class GeneratorsTrafficData( extraTrafficLimit <- Arbitrary.arbitrary[NonNegativeLong] extraTrafficConsumed <- Arbitrary.arbitrary[NonNegativeLong] baseTrafficRemainder <- Arbitrary.arbitrary[NonNegativeLong] + lastConsumedCost <- Arbitrary.arbitrary[NonNegativeLong] timestamp <- Arbitrary.arbitrary[CantonTimestamp] serial <- Arbitrary.arbitrary[Option[PositiveInt]] } yield TrafficState( extraTrafficLimit, extraTrafficConsumed, baseTrafficRemainder, + lastConsumedCost, timestamp, serial, ) diff --git a/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala b/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala index 5366767f2..4ddfdc484 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/protocol/messages/TopologyTransactionTest.scala @@ -91,7 +91,7 @@ class TopologyTransactionTest extends AnyWordSpec with BaseTest with HasCryptogr "party to participant" should { val p1 = mk( - PartyToParticipant( + PartyToParticipant.tryCreate( PartyId(uid), None, PositiveInt.one, @@ -102,12 +102,12 @@ class TopologyTransactionTest extends AnyWordSpec with BaseTest with HasCryptogr val p2 = mk( - PartyToParticipant( + PartyToParticipant.tryCreate( PartyId(uid), Some(domainId), PositiveInt.two, Seq( - HostingParticipant(ParticipantId(uid2), ParticipantPermission.Observation), + HostingParticipant(ParticipantId(uid2), ParticipantPermission.Confirmation), HostingParticipant(ParticipantId(uid), ParticipantPermission.Submission), ), groupAddressing = true, diff --git a/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala b/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala index a7070dedf..f98fbbfe3 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/sequencing/client/SequencerClientTest.scala @@ -789,6 +789,7 @@ class SequencerClientTest CantonTimestamp.MinValue.immediateSuccessor, trafficReceipt.extraTrafficConsumed, trafficReceipt.baseTrafficRemainder, + trafficReceipt.consumedCost, ) } @@ -838,6 +839,7 @@ class SequencerClientTest CantonTimestamp.MinValue.immediateSuccessor, trafficReceipt.extraTrafficConsumed, trafficReceipt.baseTrafficRemainder, + trafficReceipt.consumedCost, ) } @@ -1152,6 +1154,7 @@ class SequencerClientTest extraTrafficConsumed = NonNegativeLong.tryCreate(Math.abs(request.timestamp.toProtoPrimitive)), baseTrafficRemainder = NonNegativeLong.zero, + lastConsumedCost = NonNegativeLong.zero, timestamp = request.timestamp, serial = None, ) diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala index bbf3fa3fe..8992920d6 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/client/IdentityProvidingServiceClientTest.scala @@ -76,6 +76,11 @@ class PartyTopologySnapshotClientTest extends AsyncWordSpec with BaseTest { ): Future[Set[LfPartyId]] = ??? + override def activeParticipantsOfPartiesWithGroupAddressing( + parties: Seq[LfPartyId] + )(implicit traceContext: TraceContext): Future[Map[LfPartyId, Set[ParticipantId]]] = + ??? + override def consortiumThresholds( parties: Set[LfPartyId] )(implicit traceContext: TraceContext): Future[Map[LfPartyId, PositiveInt]] = ??? diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientTest.scala index e5b80c352..ed59e7dc0 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/client/StoreBasedDomainTopologyClientTest.scala @@ -43,7 +43,7 @@ trait StoreBasedTopologySnapshotTest extends AsyncWordSpec with BaseTest with Ha import factory.TestingTransactions.* lazy val party1participant1 = mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1, None, PositiveInt.one, @@ -52,7 +52,7 @@ trait StoreBasedTopologySnapshotTest extends AsyncWordSpec with BaseTest with Ha ) ) lazy val party2participant1_2 = mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party2, None, PositiveInt.one, diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala index d1ff80b1d..e0d7db832 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/AuthorizationGraphTest.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.topology.processing import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.crypto.SigningPublicKey import com.digitalasset.canton.topology.transaction.{NamespaceDelegation, TopologyMapping} import com.digitalasset.canton.topology.{Namespace, TestingOwnerWithKeys} @@ -25,7 +26,7 @@ class AuthorizationGraphTest def mkGraph = new AuthorizationGraph(namespace, extraDebugInfo = true, loggerFactory) - def mkAuth( + def mkAdd( nsd: NamespaceDelegation, key: SigningPublicKey, ): AuthorizedTopologyTransaction[NamespaceDelegation] = { @@ -33,16 +34,27 @@ class AuthorizationGraphTest AuthorizedTopologyTransaction(tx) } + def mkRemove( + nsd: NamespaceDelegation, + key: SigningPublicKey, + ): AuthorizedTopologyTransaction[NamespaceDelegation] = { + val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two) + AuthorizedTopologyTransaction(tx) + } + def mkNs(namespace: Namespace, key: SigningPublicKey, isRootDelegation: Boolean) = NamespaceDelegation.tryCreate(namespace, key, isRootDelegation) - val nsk1k1 = mkAuth(mkNs(namespace, key1, isRootDelegation = true), key1) - val nsk2k1 = mkAuth(mkNs(namespace, key2, isRootDelegation = true), key1) - val nsk2k1p = mkAuth(mkNs(namespace, key2, isRootDelegation = true), key1) - val nsk3k2 = mkAuth(mkNs(namespace, key3, isRootDelegation = true), key2) + val nsk1k1 = mkAdd(mkNs(namespace, key1, isRootDelegation = true), key1) + val nsk1k1_remove = mkRemove(mkNs(namespace, key1, isRootDelegation = true), key1) + val nsk2k1 = mkAdd(mkNs(namespace, key2, isRootDelegation = true), key1) + val nsk2k1_remove = mkRemove(mkNs(namespace, key2, isRootDelegation = true), key1) + val nsk3k2 = mkAdd(mkNs(namespace, key3, isRootDelegation = true), key2) + val nsk3k2_remove = mkRemove(mkNs(namespace, key3, isRootDelegation = true), key2) val nsk1k2 = - mkAuth(mkNs(namespace, key1, isRootDelegation = true), key2) // cycle - val nsk3k1_nonRoot = mkAuth(mkNs(namespace, key3, isRootDelegation = false), key1) + mkAdd(mkNs(namespace, key1, isRootDelegation = true), key2) // cycle + val nsk3k1_nonRoot = mkAdd(mkNs(namespace, key3, isRootDelegation = false), key1) + val nsk3k1_nonRoot_remove = mkRemove(mkNs(namespace, key3, isRootDelegation = false), key1) def replaceSignature[T <: TopologyMapping]( authTx: AuthorizedTopologyTransaction[T], @@ -65,7 +77,7 @@ class AuthorizationGraphTest requireRoot: Boolean, valid: Boolean, ) = { - graph.areValidAuthorizationKeys(Set(key.fingerprint), requireRoot = requireRoot) shouldBe valid + graph.existsAuthorizedKeyIn(Set(key.fingerprint), requireRoot = requireRoot) shouldBe valid } "authorization graph" when { @@ -93,7 +105,7 @@ class AuthorizationGraphTest val graph = mkGraph graph.add(nsk1k1) graph.add(nsk2k1) - graph.remove(nsk2k1) + graph.remove(nsk2k1_remove) check(graph, key2, requireRoot = false, valid = false) check(graph, key1, requireRoot = false, valid = true) } @@ -104,10 +116,13 @@ class AuthorizationGraphTest graph.add(nsk3k2) check(graph, key2, requireRoot = false, valid = true) check(graph, key3, requireRoot = false, valid = true) - loggerFactory.assertLogs(graph.remove(nsk2k1), _.warningMessage should include("dangling")) + loggerFactory.assertLogs( + graph.remove(nsk2k1_remove), + _.warningMessage should include("dangling"), + ) check(graph, key2, requireRoot = false, valid = false) check(graph, key3, requireRoot = false, valid = false) - graph.add(nsk2k1p) + graph.add(nsk2k1) check(graph, key3, requireRoot = false, valid = true) } "support several chains" in { @@ -118,7 +133,7 @@ class AuthorizationGraphTest check(graph, key3, requireRoot = false, valid = true) graph.add(nsk3k1_nonRoot) check(graph, key3, requireRoot = false, valid = true) - graph.remove(nsk3k1_nonRoot) + graph.remove(nsk3k1_nonRoot_remove) check(graph, key3, requireRoot = false, valid = true) } @@ -136,7 +151,7 @@ class AuthorizationGraphTest graph.add(nsk1k1) graph.add(nsk2k1) graph.add(nsk3k2) - graph.remove(nsk1k1) + graph.remove(nsk1k1_remove) check(graph, key1, requireRoot = false, valid = false) check(graph, key2, requireRoot = false, valid = false) check(graph, key3, requireRoot = false, valid = false) @@ -159,17 +174,17 @@ class AuthorizationGraphTest // test that random key is not authorized check(graph, key3, requireRoot = false, valid = false) // remove first certificate - graph.remove(nsk2k1) + graph.remove(nsk2k1_remove) check(graph, key2, requireRoot = true, valid = false) // add other certificate (we don't remember removes, so we can do that in this test) - graph.add(nsk2k1p) + graph.add(nsk2k1) check(graph, key2, requireRoot = true, valid = true) } "reject delegations with a wrong namespace" in { val graph = mkGraph val fakeNs = Namespace(key8.fingerprint) - val nsk1k1 = mkAuth(mkNs(fakeNs, key1, isRootDelegation = true), key1) + val nsk1k1 = mkAdd(mkNs(fakeNs, key1, isRootDelegation = true), key1) loggerFactory.assertThrowsAndLogs[IllegalArgumentException]( graph.add(nsk1k1), _.errorMessage should include("internal error"), @@ -184,7 +199,7 @@ class AuthorizationGraphTest graph.add(nsk3k2) check(graph, key3, requireRoot = true, valid = true) - graph.remove(replaceSignature(nsk3k2, key1)) + graph.remove(replaceSignature(nsk3k2_remove, key1)) check(graph, key3, requireRoot = true, valid = false) } } @@ -202,10 +217,10 @@ class AuthorizationGraphTest graph.add(nsk1k1) graph.add(nsk2k1) check(graph, key2, requireRoot = false, valid = true) - val fakeRemove = replaceSignature(nsk2k1, key6) + val fakeRemove = replaceSignature(nsk2k1_remove, key6) graph.remove(fakeRemove) shouldBe false check(graph, key2, requireRoot = false, valid = true) - graph.remove(nsk2k1) + graph.remove(nsk2k1_remove) check(graph, key2, requireRoot = false, valid = false) } "prevent a non-root authorization to authorize a root authorization" in { @@ -213,7 +228,7 @@ class AuthorizationGraphTest graph.add(nsk1k1) graph.add(nsk3k1_nonRoot) check(graph, key3, requireRoot = false, valid = true) - val nsk4k3 = mkAuth(mkNs(namespace, key4, isRootDelegation = true), key3) + val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3) graph.add(nsk4k3) shouldBe false check(graph, key4, requireRoot = false, valid = false) } @@ -225,14 +240,14 @@ class AuthorizationGraphTest graph.add(nsk2k1) check(graph, key3, requireRoot = false, valid = true) check(graph, key2, requireRoot = true, valid = true) - graph.remove(replaceSignature(nsk2k1, key3)) shouldBe false + graph.remove(replaceSignature(nsk2k1_remove, key3)) shouldBe false check(graph, key2, requireRoot = true, valid = true) } "ensure once a delegation is revoked, all depending authorizations will become unauthorized" in { val graph = mkGraph - val nsk4k3 = mkAuth(mkNs(namespace, key4, isRootDelegation = true), key3) - val nsk5k2 = mkAuth(mkNs(namespace, key5, isRootDelegation = true), key3) + val nsk4k3 = mkAdd(mkNs(namespace, key4, isRootDelegation = true), key3) + val nsk5k2 = mkAdd(mkNs(namespace, key5, isRootDelegation = true), key3) graph.add(nsk1k1) graph.add(nsk2k1) graph.add(nsk3k2) @@ -241,7 +256,7 @@ class AuthorizationGraphTest Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = true)) loggerFactory.assertLogs( { - graph.remove(nsk2k1) + graph.remove(nsk2k1_remove) Seq(key3, key4, key5).foreach(check(graph, _, requireRoot = false, valid = false)) }, _.warningMessage should include("The following target keys"), diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala index 7f7ff32c2..18f0de3d8 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/DecentralizedNamespaceAuthorizationGraphTest.scala @@ -70,7 +70,7 @@ class DecentralizedNamespaceAuthorizationGraphTest } - def mkAuth( + def mkAdd( nsd: NamespaceDelegation, key: SigningPublicKey, ): AuthorizedTopologyTransaction[NamespaceDelegation] = { @@ -78,20 +78,30 @@ class DecentralizedNamespaceAuthorizationGraphTest AuthorizedTopologyTransaction(tx) } + def mkRemove( + nsd: NamespaceDelegation, + key: SigningPublicKey, + ): AuthorizedTopologyTransaction[NamespaceDelegation] = { + val tx = factory.mkRemove(nsd, NonEmpty(Set, key), PositiveInt.two) + AuthorizedTopologyTransaction(tx) + } + def mkNs(namespace: Namespace, key: SigningPublicKey, isRootDelegation: Boolean) = NamespaceDelegation.tryCreate(namespace, key, isRootDelegation) - val ns1k1k1 = mkAuth(mkNs(ns1, key1, isRootDelegation = true), key1) - val ns1k4k1 = mkAuth(mkNs(ns1, key4, isRootDelegation = true), key1) + val ns1k1k1 = mkAdd(mkNs(ns1, key1, isRootDelegation = true), key1) - val ns2k2k2 = mkAuth(mkNs(ns2, key2, isRootDelegation = true), key2) - val ns2k5k2 = mkAuth(mkNs(ns2, key5, isRootDelegation = true), key2) - val ns2k2k5 = mkAuth(mkNs(ns2, key5, isRootDelegation = true), key2) - val ns2k8k5 = mkAuth(mkNs(ns2, key8, isRootDelegation = true), key5) - val ns2k8k2_nonRoot = mkAuth(mkNs(ns2, key8, isRootDelegation = false), key2) + val ns2k2k2 = mkAdd(mkNs(ns2, key2, isRootDelegation = true), key2) + val ns2k2k2_remove = mkRemove(mkNs(ns2, key2, isRootDelegation = true), key2) + val ns2k5k2 = mkAdd(mkNs(ns2, key5, isRootDelegation = true), key2) + val ns2k5k2_remove = mkRemove(mkNs(ns2, key5, isRootDelegation = true), key2) + val ns2k2k5 = mkAdd(mkNs(ns2, key2, isRootDelegation = true), key5) + val ns2k8k5 = mkAdd(mkNs(ns2, key8, isRootDelegation = true), key5) + val ns2k8k5_remove = mkRemove(mkNs(ns2, key8, isRootDelegation = true), key5) + val ns2k8k2_nonRoot = mkAdd(mkNs(ns2, key8, isRootDelegation = false), key2) + val ns2k8k2_nonRoot_remove = mkRemove(mkNs(ns2, key8, isRootDelegation = false), key2) - val ns3k3k3 = mkAuth(mkNs(ns3, key3, isRootDelegation = true), key3) - val ns3k6k3 = mkAuth(mkNs(ns3, key6, isRootDelegation = true), key3) + val ns3k3k3 = mkAdd(mkNs(ns3, key3, isRootDelegation = true), key3) def replaceSignature[T <: TopologyMapping]( authTx: AuthorizedTopologyTransaction[T], @@ -114,7 +124,7 @@ class DecentralizedNamespaceAuthorizationGraphTest requireRoot: Boolean, valid: Boolean, )(keys: SigningPublicKey*) = { - graph.areValidAuthorizationKeys( + graph.existsAuthorizedKeyIn( keys.map(_.fingerprint).toSet, requireRoot = requireRoot, ) shouldBe valid @@ -164,7 +174,7 @@ class DecentralizedNamespaceAuthorizationGraphTest graph.addAuth(ns2k2k2) graph.addAuth(ns3k3k3) - graph.removeAuth(ns2k2k2) + graph.removeAuth(ns2k2k2_remove) check(graph, requireRoot = false, valid = false)(key1, key2) check(graph, requireRoot = false, valid = true)(key1, key3) } @@ -180,7 +190,7 @@ class DecentralizedNamespaceAuthorizationGraphTest check(graph, requireRoot = false, valid = true)(key1, key5) check(graph, requireRoot = false, valid = true)(key1, key8) loggerFactory.assertLogs( - graph.removeAuth(ns2k5k2), + graph.removeAuth(ns2k5k2_remove), _.warningMessage should include("dangling"), ) check(graph, requireRoot = false, valid = false)(key1, key5) @@ -200,7 +210,7 @@ class DecentralizedNamespaceAuthorizationGraphTest check(graph, requireRoot = false, valid = true)(key1, key8) graph.addAuth(ns2k8k2_nonRoot) check(graph, requireRoot = false, valid = true)(key1, key8) - graph.removeAuth(ns2k8k2_nonRoot) + graph.removeAuth(ns2k8k2_nonRoot_remove) check(graph, requireRoot = false, valid = true)(key1, key8) } @@ -222,7 +232,7 @@ class DecentralizedNamespaceAuthorizationGraphTest graph.addAuth(ns2k5k2) graph.addAuth(ns2k8k5) - graph.removeAuth(ns2k2k2) + graph.removeAuth(ns2k2k2_remove) check(graph, requireRoot = false, valid = false)(key1, key2) check(graph, requireRoot = false, valid = false)(key1, key5) check(graph, requireRoot = false, valid = false)(key1, key8) @@ -247,7 +257,7 @@ class DecentralizedNamespaceAuthorizationGraphTest // test that random key is not authorized check(graph, requireRoot = false, valid = false)(key1, key3) // remove first certificate - graph.removeAuth(ns2k5k2) + graph.removeAuth(ns2k5k2_remove) check(graph, requireRoot = true, valid = false)(key1, key5) // add other certificate (we don't remember removes, so we can do that in this test) graph.addAuth(ns2k5k2) @@ -264,7 +274,7 @@ class DecentralizedNamespaceAuthorizationGraphTest graph.addAuth(ns2k8k5) check(graph, requireRoot = true, valid = true)(key1, key8) - graph.removeAuth(replaceSignature(ns2k8k5, key2)) + graph.removeAuth(replaceSignature(ns2k8k5_remove, key2)) check(graph, requireRoot = true, valid = false)(key1, key8) } } diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala index b2437ce24..317c680fd 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/IncomingTopologyTransactionAuthorizationValidatorTest.scala @@ -5,9 +5,11 @@ package com.digitalasset.canton.topology.processing import cats.Apply import cats.instances.list.* +import cats.syntax.foldable.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.SigningPublicKey +import com.digitalasset.canton.crypto.SignatureCheckError.InvalidSignature +import com.digitalasset.canton.crypto.{Signature, SigningPublicKey} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore @@ -21,10 +23,13 @@ import com.digitalasset.canton.topology.store.{ TopologyTransactionRejection, ValidatedTopologyTransaction, } +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyMapping.MappingHash import com.digitalasset.canton.topology.transaction.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil import com.digitalasset.canton.{BaseTest, HasExecutionContext, ProtocolVersionChecksAsyncWordSpec} +import com.google.protobuf.ByteString import org.scalatest.wordspec.AsyncWordSpec class IncomingTopologyTransactionAuthorizationValidatorTest @@ -69,12 +74,35 @@ class IncomingTopologyTransactionAuthorizationValidatorTest succeed } + def validate( + validator: IncomingTopologyTransactionAuthorizationValidator, + timestamp: CantonTimestamp, + toValidate: Seq[GenericSignedTopologyTransaction], + inStore: Map[MappingHash, GenericSignedTopologyTransaction], + expectFullAuthorization: Boolean, + )(implicit traceContext: TraceContext) = { + MonadUtil + .sequentialTraverse(toValidate)(tx => + validator.validateAndUpdateHeadAuthState( + timestamp, + tx, + inStore.get(tx.mapping.uniqueKey), + expectFullAuthorization, + ) + ) + .map { results => + val (aggregations, transactions) = results.unzip + (aggregations.combineAll, transactions) + } + } + "receiving transactions with signatures" should { "succeed to add if the signature is valid" in { val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, ns1k2_k1), Map.empty, @@ -89,7 +117,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest import Factory.* val invalid = ns1k2_k1.copy(signatures = ns1k1_k1.signatures) for { - (_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState( + (_, validatedTopologyTransactions) <- validate( + validator, ts(0), List(ns1k1_k1, invalid), Map.empty, @@ -116,7 +145,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val okmS1k7_k1_missing_k7 = okmS1k7_k1.removeSignatures(Set(SigningKeys.key7.fingerprint)).value for { - (_, validatedTopologyTransactions) <- validator.validateAndUpdateHeadAuthState( + (_, validatedTopologyTransactions) <- validate( + validator, ts(0), List(ns1k1_k1, okmS1k7_k1_missing_k7), Map.empty, @@ -133,37 +163,41 @@ class IncomingTopologyTransactionAuthorizationValidatorTest } } - // TODO(#12390) resuscitate -// "reject if the transaction is for the wrong domain" in { -// val validator = mk() -// import Factory.* -// val wrongDomain = DomainId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap)) -// val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap)) -// val wrong = mkAdd( -// ParticipantState( -// RequestSide.Both, -// wrongDomain, -// pid, -// ParticipantPermission.Submission, -// TrustLevel.Ordinary, -// ), -// Factory.SigningKeys.key1, -// ) -// for { -// res <- validator.validateAndUpdateHeadAuthState(ts(0), List(ns1k1_k1, wrong)) -// } yield { -// check( -// res._2, -// Seq( -// None, -// Some({ -// case TopologyTransactionRejection.WrongDomain(_) => true -// case _ => false -// }), -// ), -// ) -// } -// } + "reject if the transaction is for the wrong domain" in { + val validator = mk() + import Factory.* + val wrongDomain = DomainId(UniqueIdentifier.tryCreate("wrong", ns1.fingerprint.unwrap)) + val pid = ParticipantId(UniqueIdentifier.tryCreate("correct", ns1.fingerprint.unwrap)) + val wrong = mkAdd( + DomainTrustCertificate( + pid, + wrongDomain, + false, + Seq.empty, + ), + Factory.SigningKeys.key1, + ) + for { + res <- validate( + validator, + ts(0), + List(ns1k1_k1, wrong), + Map.empty, + expectFullAuthorization = false, + ) + } yield { + check( + res._2, + Seq( + None, + Some({ + case TopologyTransactionRejection.WrongDomain(_) => true + case _ => false + }), + ), + ) + } + } } "observing namespace delegations" should { @@ -171,7 +205,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, ns1k2_k1, ns1k3_k2), Map.empty, @@ -181,11 +216,46 @@ class IncomingTopologyTransactionAuthorizationValidatorTest check(res._2, Seq(None, None, None)) } } + "fail if the signature of a root certificate is not valid" in { + val validator = mk() + import Factory.* + + val sig_k1_emptySignature = Signature + .fromProtoV30(ns1k1_k1.signatures.head1.toProtoV30.copy(signature = ByteString.empty())) + .value + val ns1k1_k1WithEmptySignature = + ns1k1_k1.copy(signatures = NonEmpty(Set, sig_k1_emptySignature)) + + for { + res <- validate( + validator, + ts(0), + List(ns1k1_k1WithEmptySignature, ns1k2_k1), + Map.empty, + expectFullAuthorization = true, + ) + } yield { + check( + res._2, + Seq( + Some({ + case TopologyTransactionRejection.SignatureCheckFailed( + InvalidSignature(`sig_k1_emptySignature`, _, _) + ) => + true + case _ => false + }), + Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key1.fingerprint))), + ), + ) + } + } "fail if transaction is not properly authorized" in { val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, ns6k3_k6, ns1k3_k2, ns1k2_k1, ns1k3_k2), Map.empty, @@ -217,7 +287,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest removeTxs = Set.empty, additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(ns1k2_k1, ns1k3_k2), Map.empty, @@ -232,7 +303,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(ns1k1_k1, ns1k3_k2, id1ak4_k2, ns1k2_k1, ns6k3_k6, id1ak4_k1), Map.empty, @@ -261,7 +333,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, id1ak4_k1, ns1k2_k1, id1ak4_k2), Map.empty, @@ -275,7 +348,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(id1ak4_k1, ns1k1_k1, id1ak4_k1, id6k4_k1), Map.empty, @@ -301,7 +375,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, ns1k2_k1, okm1ak5k1E_k2, p1p1B_k2, id1ak4_k1, ns6k6_k6, p1p6_k2k6), Map.empty, @@ -315,21 +390,41 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val validator = mk() import Factory.* for { - res <- validator.validateAndUpdateHeadAuthState( + resultExpectFullAuthorization <- validate( + validator, ts(0), List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2), Map.empty, expectFullAuthorization = true, ) + // also check that insufficiently authorized non-proposals get rejected with expectFullAuthorization + resultDontExpectFullAuthorization <- validate( + validator, + ts(0), + List(ns1k1_k1, okm1ak5k1E_k2, p1p1B_k2), + Map.empty, + expectFullAuthorization = false, + ) + } yield { check( - res._2, + resultExpectFullAuthorization._2, Seq( None, Some(_ == NotAuthorized), Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), ), ) + + check( + resultDontExpectFullAuthorization._2, + Seq( + None, + Some(_ == NotAuthorized), + Some(_ == NoDelegationFoundForKeys(Set(SigningKeys.key2.fingerprint))), + ), + ) + } } "succeed with loading existing identifier delegations" in { @@ -345,7 +440,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest removeTxs = Set.empty, additions = List(ns1k1_k1, ns6k6_k6, id1ak4_k1).map(ValidatedTopologyTransaction(_)), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(ns1k2_k1, p1p6_k2k6, p1p1B_k2), Map.empty, @@ -364,7 +460,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse) val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse) for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, ns1k2_k1, id1ak4_k1, Rns1k2_k1, Rid1ak4_k1), Map.empty, @@ -381,7 +478,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest val Rns1k2_k1 = mkTrans(ns1k2_k1.transaction.reverse) val Rid1ak4_k1 = mkTrans(id1ak4_k1.transaction.reverse) for { - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(0), List(ns1k1_k1, ns1k2_k1, id1ak4_k1, Rns1k2_k1, Rid1ak4_k1, okm1ak5k1E_k2, p1p6_k2), Map.empty, @@ -419,7 +517,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest removeTxs = Set.empty, additions = List(ns6k6_k6).map(ValidatedTopologyTransaction(_)), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(ns1k1_k1, okm1bk5k1E_k1, p1p6_k6), Map.empty, @@ -444,7 +543,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest removeTxs = Set.empty, additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(Rns1k1_k1, okm1bk5k1E_k1), Map(Rns1k1_k1.mapping.uniqueKey -> ns1k1_k1), @@ -473,13 +573,15 @@ class IncomingTopologyTransactionAuthorizationValidatorTest removeTxs = Set.empty, additions = List(ns1k1_k1).map(ValidatedTopologyTransaction(_)), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(id1ak4_k1), Map.empty, expectFullAuthorization = true, ) - res2 <- validator.validateAndUpdateHeadAuthState( + res2 <- validate( + validator, ts(2), List(Rid1ak4_k1), Map.empty, @@ -511,7 +613,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest additions = List(ns1k1_k1, ns1k2_k1, id1ak4_k2, ns6k6_k6).map(ValidatedTopologyTransaction(_)), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(p1p6_k2k6, Rns1k2_k1, id6ak7_k6, p1p6_k2), Map( @@ -543,29 +646,44 @@ class IncomingTopologyTransactionAuthorizationValidatorTest import Factory.* val pid2 = ParticipantId(UniqueIdentifier.tryCreate("participant2", ns2)) - val participant2HostsParty1 = mkAddMultiKey( - PartyToParticipant( + val participants_1_2_6_HostParty1 = mkAddMultiKey( + PartyToParticipant.tryCreate( party1b, // lives in the namespace of p1, corresponding to `SigningKeys.key1` None, threshold = PositiveInt.two, Seq( HostingParticipant(participant1, ParticipantPermission.Submission), HostingParticipant(pid2, ParticipantPermission.Submission), + HostingParticipant(participant6, ParticipantPermission.Submission), ), groupAddressing = false, ), // both the party's owner and the participant sign - NonEmpty(Set, SigningKeys.key1, SigningKeys.key2), + NonEmpty(Set, SigningKeys.key1, SigningKeys.key2, SigningKeys.key6), serial = PositiveInt.one, ) - val unhostingMapping = PartyToParticipant( + val unhostingMapping = PartyToParticipant.tryCreate( party1b, None, threshold = PositiveInt.two, - Seq(HostingParticipant(participant1, ParticipantPermission.Submission)), + Seq( + HostingParticipant(participant1, ParticipantPermission.Submission), + HostingParticipant(participant6, ParticipantPermission.Submission), + ), + groupAddressing = false, + ) + val unhostingMappingAndThresholdChange = PartyToParticipant.tryCreate( + party1b, + None, + threshold = PositiveInt.one, + Seq( + HostingParticipant(participant1, ParticipantPermission.Submission), + HostingParticipant(participant6, ParticipantPermission.Submission), + ), groupAddressing = false, ) + val participant2RemovesItselfUnilaterally = mkAdd( unhostingMapping, // only the unhosting participant signs @@ -580,53 +698,54 @@ class IncomingTopologyTransactionAuthorizationValidatorTest serial = PositiveInt.two, ) - val ptpMappingHash = participant2HostsParty1.mapping.uniqueKey - import monocle.syntax.all.* + val ptpMappingHash = participants_1_2_6_HostParty1.mapping.uniqueKey for { _ <- store.update( SequencedTime(ts(0)), EffectiveTime(ts(0)), removeMapping = Map.empty, removeTxs = Set.empty, - additions = List(ns1k1_k1, ns2k2_k2).map( + additions = List(ns1k1_k1, ns2k2_k2, ns6k6_k6).map( ValidatedTopologyTransaction(_) ), ) - hostingResult <- validator.validateAndUpdateHeadAuthState( + hostingResult <- validate( + validator, ts(1), - List(participant2HostsParty1), - transactionsInStore = Map.empty, + List(participants_1_2_6_HostParty1), + inStore = Map.empty, expectFullAuthorization = false, ) // unilateral unhosting by participant2 only signed by the participant - unhostingResult <- validator.validateAndUpdateHeadAuthState( + unhostingResult <- validate( + validator, ts(2), List(participant2RemovesItselfUnilaterally), - transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1), + inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1), expectFullAuthorization = false, ) // it is still allowed to have a mix of signatures for unhosting - unhostingMixedResult <- validator.validateAndUpdateHeadAuthState( + unhostingMixedResult <- validate( + validator, ts(2), List(participant2RemovedFullyAuthorized), - transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1), + inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1), expectFullAuthorization = false, ) // the participant being removed may not sign if anything else changes - unhostingAndThresholdChangeResult <- validator.validateAndUpdateHeadAuthState( + unhostingAndThresholdChangeResult <- validate( + validator, ts(2), List( mkAddMultiKey( - unhostingMapping - .focus(_.threshold) - .replace(PositiveInt.one), + unhostingMappingAndThresholdChange, NonEmpty(Set, SigningKeys.key2), ) ), - transactionsInStore = Map(ptpMappingHash -> participant2HostsParty1), + inStore = Map(ptpMappingHash -> participants_1_2_6_HostParty1), expectFullAuthorization = false, ) } yield { @@ -657,7 +776,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ValidatedTopologyTransaction(_) ), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(1), List(dns2), decentralizedNamespaceWithMultipleOwnerThreshold @@ -695,7 +815,8 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ValidatedTopologyTransaction(_) ), ) - res <- validator.validateAndUpdateHeadAuthState( + res <- validate( + validator, ts(2), // Analogously to how the TopologyStateProcessor merges the signatures of proposals // with the same serial, combine the signature of the previous proposal to the current proposal. @@ -711,9 +832,93 @@ class IncomingTopologyTransactionAuthorizationValidatorTest check(res._2, Seq(None)) } } + + "remove from cache for TopologyChangeOp.REMOVAL" in { + val store = + new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts) + val validator = mk(store) + import Factory.* + for { + // 1. validate and store the decentralized namespace owners root certificates + resultAddOwners <- validate( + validator, + ts(0), + decentralizedNamespaceOwners, + Map.empty, + expectFullAuthorization = true, + ) + _ = resultAddOwners._2.foreach(_.rejectionReason shouldBe None) + _ <- store.update( + SequencedTime(ts(0)), + EffectiveTime(ts(0)), + removeMapping = Map.empty, + removeTxs = Set.empty, + additions = resultAddOwners._2, + ) + + // 2. validate and store the decentralized namespace definition + // this puts the DND authorization graph into the cache + resultAddDND <- validate( + validator, + ts(1), + List(dns1), + Map.empty, + expectFullAuthorization = true, + ) + _ = resultAddDND._2.foreach(_.rejectionReason shouldBe None) + _ <- store.update( + SequencedTime(ts(1)), + EffectiveTime(ts(1)), + removeMapping = Map.empty, + removeTxs = Set.empty, + additions = resultAddDND._2, + ) + + // 3. now process the removal of the decentralized namespace definition + // this should remove the DND authorization graph from the cache + resRemoveDND <- validate( + validator, + ts(2), + List(dns1Removal), + Map(dns1.mapping.uniqueKey -> dns1), + expectFullAuthorization = true, + ) + _ = resRemoveDND._2.foreach(_.rejectionReason shouldBe None) + _ <- store.update( + SequencedTime(ts(2)), + EffectiveTime(ts(2)), + removeMapping = Map(dns1Removal.mapping.uniqueKey -> dns1Removal.serial), + removeTxs = Set.empty, + additions = resRemoveDND._2, + ) + + // 4. Now to the actual test: try to authorize something for the decentralized namespace. + // this should be rejected because the namespace is not valid anymore, and the + // authorization cache has been properly cleaned up. + resultUnauthorizedIDD <- validate( + validator, + ts(3), + List(dns1Idd), + Map.empty, + expectFullAuthorization = true, + ) + } yield { + check( + resultUnauthorizedIDD._2, + Seq( + Some( + _ == NoDelegationFoundForKeys( + Set(SigningKeys.key1, SigningKeys.key8, SigningKeys.key9).map(_.fingerprint) + ) + ) + ), + ) + } + + } } - def checkProposalFlatAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = { + def checkProposalFlagAfterValidation(validationIsFinal: Boolean, expectProposal: Boolean) = { val store = new InMemoryTopologyStore(TopologyStoreId.AuthorizedStore, loggerFactory, timeouts) val validator = mk(store, validationIsFinal) @@ -751,18 +956,18 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ), BaseTest.testedProtocolVersion, ) - result_packageVetting <- validator - .validateAndUpdateHeadAuthState( - ts(1), - transactionsToValidate = List( - // Setting isProposal=true despite having enough keys. - // This simulates processing a proposal with the signature of a node, - // that got merged with another proposal already in the store. - mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true) - ), - transactionsInStore = Map.empty, - expectFullAuthorization = false, - ) + result_packageVetting <- validate( + validator, + ts(1), + toValidate = List( + // Setting isProposal=true despite having enough keys. + // This simulates processing a proposal with the signature of a node, + // that got merged with another proposal already in the store. + mkTrans(pkgTx, signingKeys = NonEmpty(Set, key1, key8), isProposal = true) + ), + inStore = Map.empty, + expectFullAuthorization = false, + ) } yield { val validatedPkgTx = result_packageVetting._2.loneElement @@ -775,11 +980,11 @@ class IncomingTopologyTransactionAuthorizationValidatorTest } "change the proposal status when the validation is final" in { - checkProposalFlatAfterValidation(validationIsFinal = true, expectProposal = false) + checkProposalFlagAfterValidation(validationIsFinal = true, expectProposal = false) } "not change the proposal status when the validation is not final" in { - checkProposalFlatAfterValidation(validationIsFinal = false, expectProposal = true) + checkProposalFlagAfterValidation(validationIsFinal = false, expectProposal = true) } "remove superfluous signatures" in { @@ -820,26 +1025,27 @@ class IncomingTopologyTransactionAuthorizationValidatorTest ), BaseTest.testedProtocolVersion, ) - resultPackageVetting <- validator - .validateAndUpdateHeadAuthState( - ts(1), - transactionsToValidate = List( - // Signing this transaction also with key9 simulates that ns9 was part of the - // decentralized namespace before and was eligible for signing the transaction. - // After this validation, we expect the signature of key9 to be removed - mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true) - ), - transactionsInStore = Map.empty, - expectFullAuthorization = false, - ) + resultPackageVetting <- validate( + validator, + ts(1), + toValidate = List( + // Signing this transaction also with key9 simulates that ns9 was part of the + // decentralized namespace before and was eligible for signing the transaction. + // After this validation, we expect the signature of key9 to be removed + mkTrans(pkgTx, signingKeys = NonEmpty(Set, key9, key1, key8), isProposal = true) + ), + inStore = Map.empty, + expectFullAuthorization = false, + ) // if there are only superfluous signatures, reject the transaction - resultOnlySuperfluousSignatures <- validator.validateAndUpdateHeadAuthState( + resultOnlySuperfluousSignatures <- validate( + validator, ts(2), - transactionsToValidate = List( + toValidate = List( mkTrans(pkgTx, signingKeys = NonEmpty(Set, key3, key5), isProposal = true) ), - transactionsInStore = Map.empty, + inStore = Map.empty, expectFullAuthorization = false, ) @@ -893,19 +1099,19 @@ class IncomingTopologyTransactionAuthorizationValidatorTest expectFullAuthorization: Boolean, signingKeys: SigningPublicKey* ) = TraceContext.withNewTraceContext { freshTraceContext => - validator - .validateAndUpdateHeadAuthState( - ts(1), - transactionsToValidate = List( - mkTrans( - pkgTx, - isProposal = isProposal, - signingKeys = NonEmpty.from(signingKeys.toSet).value, - ) - ), - transactionsInStore = Map.empty, - expectFullAuthorization = expectFullAuthorization, - )(freshTraceContext) + validate( + validator, + ts(1), + toValidate = List( + mkTrans( + pkgTx, + isProposal = isProposal, + signingKeys = NonEmpty.from(signingKeys.toSet).value, + ) + ), + inStore = Map.empty, + expectFullAuthorization = expectFullAuthorization, + )(freshTraceContext) .map(_._2.loneElement) } diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala index 379b456f3..25a009518 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/processing/TopologyTransactionTestFactory.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.topology.processing import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.PositiveInt -import com.digitalasset.canton.crypto.{Fingerprint, SigningPublicKey} +import com.digitalasset.canton.crypto.SigningPublicKey import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.protocol.TestDomainParameters import com.digitalasset.canton.time.NonNegativeFiniteDuration @@ -98,7 +98,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: val p1p1B_k2 = mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1b, None, threshold = PositiveInt.one, @@ -109,7 +109,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: ) val p1p6_k2 = mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1b, None, threshold = PositiveInt.one, @@ -120,20 +120,20 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: isProposal = true, ) val p1p6_k6 = - mkAdd( - PartyToParticipant( + mkAddMultiKey( + PartyToParticipant.tryCreate( party1b, None, threshold = PositiveInt.one, Seq(HostingParticipant(participant6, ParticipantPermission.Submission)), groupAddressing = false, ), - key6, + NonEmpty(Set, key1, key6), isProposal = true, ) val p1p6_k2k6 = mkAddMultiKey( - PartyToParticipant( + PartyToParticipant.tryCreate( party1b, None, threshold = PositiveInt.one, @@ -145,7 +145,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: val p1p6B_k3 = mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1b, Some(domainId1), threshold = PositiveInt.one, @@ -192,6 +192,15 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: NonEmpty(Set, key1, key8, key9), serial = PositiveInt.one, ) + val dns1Removal = mkRemove( + dns1.mapping, + NonEmpty(Set, key1, key8, key9), + serial = PositiveInt.two, + ) + val dns1Idd = mkAddMultiKey( + IdentifierDelegation(UniqueIdentifier.tryCreate("test", dns1.mapping.namespace), key4), + NonEmpty(Set, key1, key8, key9), + ) val dns2 = mkAdd( DecentralizedNamespaceDefinition .create(ns7, PositiveInt.one, NonEmpty(Set, ns1)) @@ -214,15 +223,19 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: serial = PositiveInt.two, isProposal = true, ) + val decentralizedNamespaceOwners = List(ns1k1_k1, ns8k8_k8, ns9k9_k9) val decentralizedNamespaceWithMultipleOwnerThreshold = List(ns1k1_k1, ns8k8_k8, ns9k9_k9, ns7k7_k7, dns1) + private val dndOwners = + NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)) + private val dndNamespace = DecentralizedNamespaceDefinition.computeNamespace(dndOwners) val dnd_proposal_k1 = mkAdd( DecentralizedNamespaceDefinition .create( - Namespace(Fingerprint.tryCreate("dnd-namespace")), + dndNamespace, PositiveInt.two, - NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)), + dndOwners, ) .fold(sys.error, identity), signingKey = key1, @@ -231,7 +244,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: val dnd_proposal_k2 = mkAdd( DecentralizedNamespaceDefinition .create( - Namespace(Fingerprint.tryCreate("dnd-namespace")), + dndNamespace, PositiveInt.two, NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)), ) @@ -242,7 +255,7 @@ class TopologyTransactionTestFactory(loggerFactory: NamedLoggerFactory, initEc: val dnd_proposal_k3 = mkAdd( DecentralizedNamespaceDefinition .create( - Namespace(Fingerprint.tryCreate("dnd-namespace")), + dndNamespace, PositiveInt.two, NonEmpty(Set, key1.fingerprint, key2.fingerprint, key3.fingerprint).map(Namespace(_)), ) diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala index 6d2b71ecb..33f91f4ba 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreTestData.scala @@ -89,7 +89,7 @@ class TopologyStoreTestData( serial = PositiveInt.tryCreate(1), ) val tx3_PTP_Proposal = makeSignedTx( - PartyToParticipant( + PartyToParticipant.tryCreate( partyId = fredOfCanton, domainId = None, threshold = PositiveInt.one, @@ -116,7 +116,7 @@ class TopologyStoreTestData( serial = PositiveInt.tryCreate(2), ) val tx5_PTP = makeSignedTx( - PartyToParticipant( + PartyToParticipant.tryCreate( partyId = fredOfCanton, domainId = None, threshold = PositiveInt.one, diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala index d473bb727..660fbc30c 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/GeneratorsTransaction.scala @@ -12,6 +12,8 @@ import com.digitalasset.canton.topology.{ GeneratorsTopology, MediatorId, Namespace, + ParticipantId, + PartyId, SequencerId, } import com.digitalasset.canton.version.ProtocolVersion @@ -20,6 +22,8 @@ import magnolify.scalacheck.auto.* import org.scalacheck.{Arbitrary, Gen} import org.scalatest.EitherValues.* +import scala.math.Ordering.Implicits.* + final class GeneratorsTransaction( protocolVersion: ProtocolVersion, generatorsProtocol: GeneratorsProtocol, @@ -49,6 +53,18 @@ final class GeneratorsTransaction( Arbitrary(Generators.nonEmptySetGen[PublicKey].map(_.toSeq)) implicit val topologyTransactionMappingsArb: Arbitrary[NonEmpty[Seq[TopologyMapping]]] = Arbitrary(Generators.nonEmptySetGen[TopologyMapping].map(_.toSeq)) + implicit val topologyTransactionPartyIdsArb: Arbitrary[NonEmpty[Seq[PartyId]]] = + Arbitrary(Generators.nonEmptySetGen[PartyId].map(_.toSeq)) + implicit val topologyTransactionHostingParticipantsArb + : Arbitrary[NonEmpty[Seq[HostingParticipant]]] = + Arbitrary(Generators.nonEmptySetGen[HostingParticipant].map(_.toSeq)) + + implicit val hostingParticipantArb: Arbitrary[HostingParticipant] = Arbitrary( + for { + pid <- Arbitrary.arbitrary[ParticipantId] + permission <- Arbitrary.arbitrary[ParticipantPermission] + } yield HostingParticipant(pid, permission) + ) implicit val topologyMappingArb: Arbitrary[TopologyMapping] = genArbitrary @@ -89,6 +105,31 @@ final class GeneratorsTransaction( } yield PurgeTopologyTransaction.create(domain, mappings).value ) + implicit val authorityOfTopologyTransactionArb: Arbitrary[AuthorityOf] = Arbitrary( + for { + partyId <- Arbitrary.arbitrary[PartyId] + domain <- Arbitrary.arbitrary[Option[DomainId]] + authorizers <- Arbitrary.arbitrary[NonEmpty[Seq[PartyId]]] + // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint + threshold <- Gen.choose(1, authorizers.size).map(PositiveInt.tryCreate) + } yield AuthorityOf.create(partyId, domain, threshold, authorizers).value + ) + + implicit val partyToParticipantTopologyTransactionArb: Arbitrary[PartyToParticipant] = Arbitrary( + for { + partyId <- Arbitrary.arbitrary[PartyId] + domain <- Arbitrary.arbitrary[Option[DomainId]] + participants <- Arbitrary.arbitrary[NonEmpty[Seq[HostingParticipant]]] + // Not using Arbitrary.arbitrary[PositiveInt] for threshold to honor constraint + threshold <- Gen + .choose(1, participants.count(_.permission >= ParticipantPermission.Confirmation).max(1)) + .map(PositiveInt.tryCreate) + groupAddressing <- Arbitrary.arbitrary[Boolean] + } yield PartyToParticipant + .create(partyId, domain, threshold, participants, groupAddressing) + .value + ) + implicit val sequencerDomainStateArb: Arbitrary[SequencerDomainState] = Arbitrary( for { domain <- Arbitrary.arbitrary[DomainId] diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala index 0dd8eecf7..297a955f5 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/transaction/ValidatingTopologyMappingChecksTest.scala @@ -5,12 +5,16 @@ package com.digitalasset.canton.topology.transaction import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.Fingerprint import com.digitalasset.canton.protocol.{DynamicDomainParameters, OnboardingRestriction} import com.digitalasset.canton.topology.DefaultTestIdentities.{mediatorId, sequencerId} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore -import com.digitalasset.canton.topology.store.TopologyTransactionRejection.InvalidTopologyMapping +import com.digitalasset.canton.topology.store.TopologyTransactionRejection.{ + InvalidTopologyMapping, + PartyExceedsHostingLimit, +} import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStore import com.digitalasset.canton.topology.store.{ StoredTopologyTransaction, @@ -97,35 +101,64 @@ class ValidatingTopologyMappingChecksTest } } - "validating PartyToParticipant" should { - - "reject an invalid threshold" in { - val (checks, _) = mk() + "validating DecentralizedNamespaceDefinition" should { + "reject namespaces not derived from their owners' namespaces" in { + val (checks, store) = mk() + val keys = NonEmpty.mk( + Set, + factory.SigningKeys.key1, + factory.SigningKeys.key2, + factory.SigningKeys.key3, + ) + val (namespaces, rootCerts) = + keys.map { key => + val namespace = Namespace(key.fingerprint) + namespace -> factory.mkAdd( + NamespaceDelegation.tryCreate( + namespace, + key, + isRootDelegation = true, + ), + signingKey = key, + ) + }.unzip - val failureCases = Seq[(PositiveInt, Seq[HostingParticipant])]( - PositiveInt.two -> Seq(participant1 -> Observation, participant2 -> Confirmation), - PositiveInt.two -> Seq(participant1 -> Observation, participant2 -> Submission), - PositiveInt.two -> Seq(participant1 -> Submission), - PositiveInt.one -> Seq(participant1 -> Observation), - ) + addToStore(store, rootCerts.toSeq*) - failureCases.foreach { case (threshold, participants) => - val ptp = factory.mkAdd( - PartyToParticipant( - party1, - None, - threshold, - participants, - groupAddressing = false, + val dns = factory.mkAddMultiKey( + DecentralizedNamespaceDefinition + .create( + Namespace(Fingerprint.tryCreate("bogusNamespace")), + PositiveInt.one, + NonEmpty.from(namespaces).value.toSet, ) - ) + .value, + signingKeys = keys, + // using serial=2 here to test that we don't special case serial=1 + serial = PositiveInt.two, + ) - checkTransaction(checks, ptp) should matchPattern { - case Left(TopologyTransactionRejection.ThresholdTooHigh(`threshold`.value, _)) => - } + checkTransaction(checks, dns, None) should matchPattern { + case Left(TopologyTransactionRejection.InvalidTopologyMapping(err)) + if err.contains("not derived from the owners") => } } + // TODO(#19716) how does one produce a key with a specific hash? by using symbolic crypto? + "reject if a root certificate with the same namespace already exists" ignore { + fail("TODO(#19716)") + } + } + + "validating NamespaceDelegation" should { + // TODO(#19715) how does one produce a key with a specific hash? by using symbolic crypto? + "reject a root certificate if a decentralized namespace with the same namespace already exists" ignore { + fail("TODO(#19715)") + } + } + + "validating PartyToParticipant" should { + "reject when participants don't have a DTC" in { val (checks, store) = mk() addToStore(store, p2_dtc) @@ -134,7 +167,7 @@ class ValidatingTopologyMappingChecksTest failureCases.foreach { participants => val ptp = factory.mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1, None, PositiveInt.one, @@ -163,7 +196,7 @@ class ValidatingTopologyMappingChecksTest missingKeyCases.foreach { participant => val ptp = factory.mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1, None, PositiveInt.one, @@ -177,6 +210,40 @@ class ValidatingTopologyMappingChecksTest } } + "reject when the party exceeds the explicitly issued PartyHostingLimits" in { + def mkPTP(numParticipants: Int) = { + val hostingParticipants = Seq[HostingParticipant]( + participant1 -> Observation, + participant2 -> Submission, + participant3 -> Submission, + ) + factory.mkAdd( + PartyToParticipant.tryCreate( + partyId = party1, + domainId = None, + threshold = PositiveInt.one, + participants = hostingParticipants.take(numParticipants), + groupAddressing = false, + ) + ) + } + + val (checks, store) = mk() + val limits = factory.mkAdd(PartyHostingLimits(domainId, party1, 2)) + addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc, limits) + + // 2 participants are at the limit + val twoParticipants = mkPTP(numParticipants = 2) + checkTransaction(checks, twoParticipants) shouldBe Right(()) + + // 3 participants exceed the limit imposed by the domain + val threeParticipants = mkPTP(numParticipants = 3) + checkTransaction(checks, threeParticipants) shouldBe Left( + PartyExceedsHostingLimit(party1, 2, 3) + ) + + } + "report no errors for valid mappings" in { val (checks, store) = mk() addToStore(store, p1_otk, p1_dtc, p2_otk, p2_dtc, p3_otk, p3_dtc) @@ -195,7 +262,7 @@ class ValidatingTopologyMappingChecksTest validCases.foreach { case (threshold, participants) => val ptp = factory.mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1, None, threshold, @@ -213,7 +280,7 @@ class ValidatingTopologyMappingChecksTest "reject a removal when the participant still hosts a party" in { val (checks, store) = mk() val ptp = factory.mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party1, None, PositiveInt.one, @@ -385,35 +452,6 @@ class ValidatingTopologyMappingChecksTest ) ) } - - "report ThresholdTooHigh" in { - val (checks, store) = mk() - val (Seq(med1, med2), transactions) = generateMemberIdentities(2, MediatorId(_)) - addToStore(store, transactions*) - - // using reflection to create an instance via the private constructor - // so we can bypass the checks in MediatorDomainState.create - val ctr = classOf[MediatorDomainState].getConstructor( - classOf[DomainId], - classOf[NonNegativeInt], - classOf[PositiveInt], - classOf[Object], - classOf[Seq[MediatorId]], - ) - val invalidMapping = ctr.newInstance( - domainId, - NonNegativeInt.zero, - PositiveInt.three, // threshold higher than number of active mediators - NonEmpty(Seq, med1, med2), - Seq.empty, - ) - - val mds = factory.mkAdd(invalidMapping, factory.SigningKeys.key1) - - checkTransaction(checks, mds) shouldBe Left( - TopologyTransactionRejection.ThresholdTooHigh(3, 2) - ) - } } "validating SequencerDomainState" should { @@ -491,33 +529,6 @@ class ValidatingTopologyMappingChecksTest ) ) } - - "report ThresholdTooHigh" in { - val (checks, store) = mk() - val (Seq(seq1, seq2), transactions) = generateMemberIdentities(2, SequencerId(_)) - addToStore(store, transactions*) - - // using reflection to create an instance via the private constructor - // so we can bypass the checks in SequencerDomainState.create - val ctr = classOf[SequencerDomainState].getConstructor( - classOf[DomainId], - classOf[PositiveInt], - classOf[Object], - classOf[Seq[SequencerId]], - ) - val invalidMapping = ctr.newInstance( - domainId, - PositiveInt.three, // threshold higher than number of active sequencers - NonEmpty(Seq, seq1, seq2), - Seq.empty, - ) - - val sds = factory.mkAdd(invalidMapping, factory.SigningKeys.key1) - - checkTransaction(checks, sds) shouldBe Left( - TopologyTransactionRejection.ThresholdTooHigh(3, 2) - ) - } } "validating OwnerToKeyMapping" should { @@ -582,7 +593,7 @@ class ValidatingTopologyMappingChecksTest "validating AuthorityOf" should { val ptps @ Seq(p1_ptp, p2_ptp, p3_ptp) = Seq(party1, party2, party3).map { party => factory.mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( party, None, PositiveInt.one, @@ -596,7 +607,9 @@ class ValidatingTopologyMappingChecksTest addToStore(store, ptps*) val authorityOf = - factory.mkAdd(AuthorityOf(party1, None, PositiveInt.two, Seq(party2, party3))) + factory.mkAdd( + AuthorityOf.create(party1, None, PositiveInt.two, Seq(party2, party3)).value + ) checkTransaction(checks, authorityOf) shouldBe Right(()) } @@ -605,40 +618,31 @@ class ValidatingTopologyMappingChecksTest addToStore(store, p1_ptp) val missingAuthorizingParty = - factory.mkAdd(AuthorityOf(party2, None, PositiveInt.one, Seq(party1))) + factory.mkAdd(AuthorityOf.create(party2, None, PositiveInt.one, Seq(party1)).value) checkTransaction(checks, missingAuthorizingParty) shouldBe Left( TopologyTransactionRejection.UnknownParties(Seq(party2)) ) val missingAuthorizedParty = - factory.mkAdd(AuthorityOf(party1, None, PositiveInt.one, Seq(party2))) + factory.mkAdd(AuthorityOf.create(party1, None, PositiveInt.one, Seq(party2)).value) checkTransaction(checks, missingAuthorizedParty) shouldBe Left( TopologyTransactionRejection.UnknownParties(Seq(party2)) ) val missingAllParties = - factory.mkAdd(AuthorityOf(party2, None, PositiveInt.one, Seq(party3))) + factory.mkAdd(AuthorityOf.create(party2, None, PositiveInt.one, Seq(party3)).value) checkTransaction(checks, missingAllParties) shouldBe Left( TopologyTransactionRejection.UnknownParties(Seq(party2, party3)) ) val missingMixedParties = - factory.mkAdd(AuthorityOf(party2, None, PositiveInt.one, Seq(party1, party3))) + factory.mkAdd( + AuthorityOf.create(party2, None, PositiveInt.one, Seq(party1, party3)).value + ) checkTransaction(checks, missingMixedParties) shouldBe Left( TopologyTransactionRejection.UnknownParties(Seq(party2, party3)) ) } - - "report ThresholdTooHigh if the threshold is higher than the number of authorized parties" in { - val (checks, store) = mk() - addToStore(store, ptps*) - - val thresholdTooHigh = - factory.mkAdd(AuthorityOf(party1, None, PositiveInt.three, Seq(party2, party3))) - checkTransaction(checks, thresholdTooHigh) shouldBe Left( - TopologyTransactionRejection.ThresholdTooHigh(3, 2) - ) - } } } diff --git a/community/common/src/test/scala/com/digitalasset/canton/tracing/TracedScaffeineTest.scala b/community/common/src/test/scala/com/digitalasset/canton/tracing/TracedScaffeineTest.scala index c7669e7b9..67a77e72f 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/tracing/TracedScaffeineTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/tracing/TracedScaffeineTest.scala @@ -32,7 +32,7 @@ class TracedScaffeineTest extends AsyncWordSpec with BaseTest { } }.failOnShutdown - "Handle an AbortDueToShutdownException" in { + "Handle AbortDueToShutdownException in get" in { val keysCache = TracedScaffeine.buildTracedAsyncFutureUS[Int, Int]( cache = CachingConfigs.testing.mySigningKeyCache.buildScaffeine(), @@ -47,6 +47,22 @@ class TracedScaffeineTest extends AsyncWordSpec with BaseTest { } } + // Note that when Scaffeine.getAll returns a failed future that wraps the underlying exception + // with java.util.concurrent.CompletionException + "Handle AbortDueToShutdownException in getAll" in { + val keysCache = + TracedScaffeine.buildTracedAsyncFutureUS[Int, Int]( + cache = CachingConfigs.testing.mySigningKeyCache.buildScaffeine(), + loader = traceContext => input => getValueBroken(input), + )(logger) + + for { + result <- keysCache.getAllUS(Set(10)).unwrap + } yield { + result shouldBe UnlessShutdown.AbortedDueToShutdown + } + } + "Allow entries to be cleared" in { val loads = new AtomicInteger(0) diff --git a/community/common/src/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala b/community/common/src/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala index d01b6c2d0..0b238ed04 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/version/HasProtocolVersionedWrapperTest.scala @@ -122,8 +122,8 @@ class HasProtocolVersionedWrapperTest extends AnyWordSpec with BaseTest { import com.digitalasset.canton.version.HasProtocolVersionedWrapperTest.Message.* // Used by the compiled string below - val stablePV = ProtocolVersion.stable(10) - val unstablePV = ProtocolVersion.unstable(11) + val stablePV = ProtocolVersion.createStable(10) + val unstablePV = ProtocolVersion.createUnstable(11) def name: String = "message" @@ -213,14 +213,14 @@ object HasProtocolVersionedWrapperTest { protocolVersion 30 31 32 33 34 ... */ override val supportedProtoVersions = SupportedProtoVersions( - ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.unstable((basePV + 2).v))( + ProtoVersion(1) -> VersionedProtoConverter(ProtocolVersion.createUnstable((basePV + 2).v))( VersionedMessageV1 )( supportedProtoVersionMemoized(_)(fromProtoV1), _.toProtoV1.toByteString, ), // Can use a stable Protobuf message in a stable protocol version - ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.stable(basePV.v))( + ProtoVersion(0) -> VersionedProtoConverter(ProtocolVersion.createStable(basePV.v))( VersionedMessageV0 )( supportedProtoVersionMemoized(_)(fromProtoV0), @@ -228,7 +228,7 @@ object HasProtocolVersionedWrapperTest { ), // Can use an unstable Protobuf message in an unstable protocol version ProtoVersion(2) -> VersionedProtoConverter( - ProtocolVersion.unstable((basePV + 3).v) + ProtocolVersion.createUnstable((basePV + 3).v) )(VersionedMessageV2)( supportedProtoVersionMemoized(_)(fromProtoV2), _.toProtoV2.toByteString, diff --git a/community/common/src/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala b/community/common/src/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala index 790253344..9c669a45e 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/version/ProtocolVersionTest.scala @@ -112,6 +112,5 @@ class ProtocolVersionTest extends AnyWordSpec with BaseTest { result shouldBe a[ParsingResult[?]] result.left.value should have message unsupportedErrorMessage(invalidProtocolVersion) } - } } diff --git a/community/demo/src/main/daml/ai-analysis/daml.yaml b/community/demo/src/main/daml/ai-analysis/daml.yaml index 473415caa..fae2c2a68 100644 --- a/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: ai-analysis diff --git a/community/demo/src/main/daml/bank/daml.yaml b/community/demo/src/main/daml/bank/daml.yaml index 8917867cc..ae6291a97 100644 --- a/community/demo/src/main/daml/bank/daml.yaml +++ b/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: bank diff --git a/community/demo/src/main/daml/doctor/daml.yaml b/community/demo/src/main/daml/doctor/daml.yaml index 341c35727..9e18e73da 100644 --- a/community/demo/src/main/daml/doctor/daml.yaml +++ b/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: doctor diff --git a/community/demo/src/main/daml/health-insurance/daml.yaml b/community/demo/src/main/daml/health-insurance/daml.yaml index dfccbb470..23d9cf038 100644 --- a/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: health-insurance diff --git a/community/demo/src/main/daml/medical-records/daml.yaml b/community/demo/src/main/daml/medical-records/daml.yaml index 21ef0dcd3..16e792c5f 100644 --- a/community/demo/src/main/daml/medical-records/daml.yaml +++ b/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: medical-records diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala index 8bd13a858..fc2b1a353 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala @@ -33,7 +33,6 @@ import com.digitalasset.canton.domain.block.update.{ import com.digitalasset.canton.domain.sequencing.integrations.state.statemanager.MemberCounters import com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencer import com.digitalasset.canton.domain.sequencing.sequencer.errors.CreateSubscriptionError -import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerRateLimitManager import com.digitalasset.canton.domain.sequencing.sequencer.{Sequencer, SequencerIntegration} import com.digitalasset.canton.error.BaseAlarm import com.digitalasset.canton.lifecycle.{ @@ -50,7 +49,7 @@ import com.digitalasset.canton.topology.{DomainId, Member, SequencerId} import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.util.{ErrorUtil, LoggerUtil, MapsUtil, MonadUtil} +import com.digitalasset.canton.util.{ErrorUtil, LoggerUtil, MapsUtil} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import org.apache.pekko.stream.KillSwitches @@ -128,7 +127,6 @@ class BlockSequencerStateManager( override val maybeLowerTopologyTimestampBound: Option[CantonTimestamp], override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, - rateLimitManager: SequencerRateLimitManager, unifiedSequencer: Boolean, )(implicit executionContext: ExecutionContext) extends BlockSequencerStateManagerBase @@ -402,12 +400,13 @@ class BlockSequencerStateManager( ) .map { case (_, event) => if (event.isTombstone) { - val err = - s"Encountered tombstone ${event.counter} and ${event.timestamp} for $member" - logger.warn(s"Terminating subscription due to: $err")(event.traceContext) - Left( - SequencerSubscriptionError.TombstoneEncountered.Error(err) + val err = SequencerSubscriptionError.TombstoneEncountered.Error( + event.counter, + member, + event.timestamp, ) + logger.warn(s"Terminating subscription due to: ${err.cause}")(event.traceContext) + Left(err) } else { Right(event) } @@ -446,20 +445,6 @@ class BlockSequencerStateManager( newHead } - private def updateMemberCounterSupportedAfter(member: Member, counter: SequencerCounter)(implicit - traceContext: TraceContext - ): Future[Unit] = - store - .updateMemberCounterSupportedAfter(member, counter) - .map(_ => - countersSupportedAfter.getAndUpdate { previousCounters => - if (previousCounters.get(member).exists(_ >= counter)) - previousCounters - else - previousCounters + (member -> counter) - }.discard - ) - private def handleChunkUpdate( priorHead: HeadState, update: ChunkUpdate[SignedChunkEvents], @@ -553,13 +538,6 @@ class BlockSequencerStateManager( membersDisabled = Seq.empty, inFlightAggregationUpdates = update.inFlightAggregationUpdates, ) - _ <- MonadUtil.sequentialTraverse[(Member, SequencerCounter), Future, Unit]( - update.events - .flatMap(_.events) - .collect { - case (member, tombstone) if tombstone.isTombstone => member -> tombstone.counter - } - ) { case (member, counter) => updateMemberCounterSupportedAfter(member, counter) } } yield { // head state update must happen before member counters are updated // as otherwise, if we have a registration in between counter-signalling and head-state, @@ -788,7 +766,6 @@ object BlockSequencerStateManager { enableInvariantCheck: Boolean, timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, - rateLimitManager: SequencerRateLimitManager, unifiedSequencer: Boolean, )(implicit executionContext: ExecutionContext, @@ -809,7 +786,6 @@ object BlockSequencerStateManager { maybeLowerTopologyTimestampBound = maybeLowerTopologyTimestampBound, timeouts = timeouts, loggerFactory = loggerFactory, - rateLimitManager = rateLimitManager, unifiedSequencer = unifiedSequencer, ) } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala index 04147114f..a47fb508d 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala @@ -336,6 +336,7 @@ private[update] final class BlockChunkProcessor( case Some(params) => newMembers.toList .parTraverse_ { case (member, timestamp) => + // Note: in unified sequencer mode, rate limiter uses a default value if member is not present in its state rateLimitManager .registerNewMemberAt( member, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala index 562700550..bdbc353ec 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala @@ -702,30 +702,29 @@ private[update] final class SubmissionRequestValidator( } topologyTimestampO = submissionRequest.topologyTimestamp + members = + groupToMembers.values.flatten.toSet ++ submissionRequest.batch.allMembers + submissionRequest.sender events = if (unifiedSequencer) { Map.empty[Member, Deliver[ClosedEnvelope]] } else { - (groupToMembers.values.flatten.toSet ++ submissionRequest.batch.allMembers + submissionRequest.sender).toSeq.map { - member => - val groups = groupToMembers.collect { - case (groupAddress, members) if members.contains(member) => groupAddress - }.toSet - val deliver = Deliver.create( - state.tryNextCounter(member), - sequencingTimestamp, - domainId, - Option.when(member == submissionRequest.sender)(submissionRequest.messageId), - Batch.filterClosedEnvelopesFor(aggregatedBatch, member, groups), - topologyTimestampO, - protocolVersion, - Option.empty[TrafficReceipt], - ) - member -> deliver + members.toSeq.map { member => + val groups = groupToMembers.collect { + case (groupAddress, members) if members.contains(member) => groupAddress + }.toSet + val deliver = Deliver.create( + state.tryNextCounter(member), + sequencingTimestamp, + domainId, + Option.when(member == submissionRequest.sender)(submissionRequest.messageId), + Batch.filterClosedEnvelopesFor(aggregatedBatch, member, groups), + topologyTimestampO, + protocolVersion, + Option.empty[TrafficReceipt], + ) + member -> deliver }.toMap } - members = - groupToMembers.values.flatten.toSet ++ submissionRequest.batch.allMembers + submissionRequest.sender aggregationUpdate = aggregationOutcome.map { case (aggregationId, inFlightAggregationUpdate, _) => aggregationId -> inFlightAggregationUpdate diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala index caa0aad02..b0d3e6310 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala @@ -195,9 +195,7 @@ private[update] class TrafficControlValidator( sender, // When above traffic limit we don't consume traffic, hence cost = 0 Some( - error.trafficState.toTrafficReceipt( - consumedCost = NonNegativeLong.zero - ) + error.trafficState.copy(lastConsumedCost = NonNegativeLong.zero).toTrafficReceipt ), ) // Outdated event costs are possible if the sender is too far behind and out of the tolerance window. diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala index ca3e840eb..8391d0b60 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala @@ -37,6 +37,7 @@ final case class DomainParametersConfig( requiredCryptoKeyFormats: Option[NonEmpty[Set[CryptoKeyFormat]]] = None, // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version override val devVersionSupport: Boolean = true, + override val betaVersionSupport: Boolean = false, override val dontWarnOnDeprecatedPV: Boolean = false, ) extends ProtocolConfig with PrettyPrinting { diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationRequestAndResponseProcessor.scala similarity index 99% rename from community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala rename to community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationRequestAndResponseProcessor.scala index 60f911068..d27afc9cd 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessor.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/ConfirmationRequestAndResponseProcessor.scala @@ -42,7 +42,7 @@ import scala.concurrent.{ExecutionContext, Future} /** Scalable service to validate the received MediatorConfirmationRequests and ConfirmationResponses, * derive a verdict, and send ConfirmationResultMessages to informee participants. */ -private[mediator] class ConfirmationResponseProcessor( +private[mediator] class ConfirmationRequestAndResponseProcessor( domainId: DomainId, private val mediatorId: MediatorId, verdictSender: VerdictSender, @@ -211,7 +211,7 @@ private[mediator] class ConfirmationResponseProcessor( rootHashMessages: Seq[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], batchAlsoContainsTopologyTransaction: Boolean, )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { - withSpan("TransactionConfirmationResponseProcessor.processRequest") { + withSpan("ConfirmationRequestAndResponseProcessor.processRequest") { val timeout = requestId.unwrap.plus(confirmationResponseTimeout.unwrap) implicit traceContext => span => @@ -755,7 +755,7 @@ private[mediator] class ConfirmationResponseProcessor( topologyTimestamp: Option[CantonTimestamp], recipients: Recipients, )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - withSpan("TransactionConfirmationResponseProcessor.processResponse") { + withSpan("ConfirmationRequestAndResponseProcessor.processResponse") { implicit traceContext => span => span.setAttribute("timestamp", ts.toString) span.setAttribute("counter", counter.toString) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala index 408047ef8..386d67383 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala @@ -99,7 +99,7 @@ private[mediator] class Mediator( private val verdictSender = VerdictSender(sequencerClient, syncCrypto, mediatorId, protocolVersion, loggerFactory) - private val processor = new ConfirmationResponseProcessor( + private val processor = new ConfirmationRequestAndResponseProcessor( domain, mediatorId, verdictSender, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEvent.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEvent.scala index caf2b0251..c784f635c 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEvent.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEvent.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.sequencing.protocol.{OpenEnvelope, Recipients} /** The [[MediatorEventsProcessor]] looks through all sequencer events provided by the sequencer client in a batch * to pick out events for the Mediator with the same request-id while also scheduling timeouts and running * topology transactions at appropriate times. We map all the mediator events we generate into this simplified - * structure so the [[ConfirmationResponseProcessor]] processes these events without having to perform the same extraction + * structure so the [[ConfirmationRequestAndResponseProcessor]] processes these events without having to perform the same extraction * and error handling of the original SequencerEvent. */ private[mediator] sealed trait MediatorEvent extends PrettyPrinting { diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEventsProcessor.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEventsProcessor.scala index 0afc363b9..db1e9bce6 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEventsProcessor.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorEventsProcessor.scala @@ -166,14 +166,14 @@ private[mediator] class MediatorEventsProcessor( private[mediator] object MediatorEventsProcessor { def apply( identityClientEventHandler: UnsignedProtocolEventHandler, - confirmationResponseProcessor: ConfirmationResponseProcessor, + processor: ConfirmationRequestAndResponseProcessor, mediatorEventDeduplicator: MediatorEventDeduplicator, metrics: MediatorMetrics, loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext): MediatorEventsProcessor = { new MediatorEventsProcessor( identityClientEventHandler, - confirmationResponseProcessor.handleRequestEvents, + processor.handleRequestEvents, mediatorEventDeduplicator, metrics, loggerFactory, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala index 6de61b2d0..bf752118f 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala @@ -91,6 +91,7 @@ abstract class MediatorNodeConfigCommon( final case class MediatorNodeParameterConfig( // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version override val devVersionSupport: Boolean = true, + override val betaVersionSupport: Boolean = false, override val dontWarnOnDeprecatedPV: Boolean = false, override val batching: BatchingConfig = BatchingConfig(), override val caching: CachingConfigs = CachingConfigs(), @@ -491,7 +492,7 @@ class MediatorNodeBootstrap( else // TODO(#15561) Remove NonEmpty construct once stableAndSupported is NonEmpty again NonEmpty - .from(ProtocolVersion.stableAndSupported) + .from(ProtocolVersion.stable) .getOrElse(sys.error("no protocol version is considered stable in this release")), minimumProtocolVersion = Some(ProtocolVersion.minimum), dontWarnOnDeprecatedPV = parameterConfig.dontWarnOnDeprecatedPV, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala index ddbcd521a..ea5de0797 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/VerdictSender.scala @@ -215,7 +215,7 @@ private[mediator] class DefaultVerdictSender( .map(Recipients.recipientGroups) .getOrElse( // Should never happen as the topology (same snapshot) is checked in - // `ConfirmationResponseProcessor.validateRequest` + // `ConfirmationRequestAndResponseProcessor.validateRequest` ErrorUtil.invalidState("No active participants for informees") ) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/config/SequencerNodeParameterConfig.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/config/SequencerNodeParameterConfig.scala index d7a9ea9e9..00c1d4ffa 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/config/SequencerNodeParameterConfig.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/config/SequencerNodeParameterConfig.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.config.{ final case class SequencerNodeParameterConfig( // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version override val devVersionSupport: Boolean = true, + override val betaVersionSupport: Boolean = false, override val dontWarnOnDeprecatedPV: Boolean = false, maxConfirmationRequestsBurstFactor: PositiveDouble = PositiveDouble.tryCreate(0.5), override val batching: BatchingConfig = BatchingConfig(), diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala index ac26a5d5b..e3a556440 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencer.scala @@ -22,6 +22,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.traffic.{ SequencerRateLimitError, SequencerTrafficStatus, } +import com.digitalasset.canton.domain.sequencing.traffic.store.TrafficConsumedStore import com.digitalasset.canton.health.admin.data.{SequencerAdminStatus, SequencerHealthStatus} import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle} import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, TracedLogger} @@ -43,9 +44,9 @@ import com.digitalasset.canton.time.{Clock, NonNegativeFiniteDuration} import com.digitalasset.canton.topology.{DomainId, Member, SequencerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.tracing.TraceContext.withNewTraceContext -import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.FutureUtil.doNotAwait import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil} import com.digitalasset.canton.version.ProtocolVersion import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer @@ -101,6 +102,7 @@ object DatabaseSequencer { clock, domainId, topologyClientMember, + trafficConsumedStore = None, protocolVersion, cryptoApi, metrics, @@ -125,6 +127,7 @@ class DatabaseSequencer( clock: Clock, domainId: DomainId, topologyClientMember: Member, + trafficConsumedStore: Option[TrafficConsumedStore], protocolVersion: ProtocolVersion, cryptoApi: DomainSyncCryptoClient, metrics: SequencerMetrics, @@ -164,7 +167,7 @@ class DatabaseSequencer( storageForAdminChanges.isActive ) - private val store = writer.generalStore + private[sequencer] val store = writer.generalStore protected val memberValidator: SequencerMemberValidator = store @@ -230,6 +233,7 @@ class DatabaseSequencer( cryptoApi, eventSignaller, topologyClientMember, + trafficConsumedStore, protocolVersion, timeouts, loggerFactory, @@ -253,9 +257,10 @@ class DatabaseSequencer( } yield isEnabled } - /** Package private to use access method in tests, see `TestDatabaseSequencerWrapper`. - */ - override final def registerMemberInternal(member: Member, timestamp: CantonTimestamp)(implicit + override private[sequencing] final def registerMemberInternal( + member: Member, + timestamp: CantonTimestamp, + )(implicit traceContext: TraceContext ): EitherT[Future, RegisterError, Unit] = { EitherT @@ -396,8 +401,25 @@ class DatabaseSequencer( override def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext - ): EitherT[Future, String, SequencerSnapshot] = - EitherT.right[String](store.readStateAtTimestamp(timestamp)) + ): EitherT[Future, String, SequencerSnapshot] = { + for { + safeWatermarkO <- EitherT.right(store.safeWatermark) + // we check if watermark is after the requested timestamp to avoid snapshotting the sequencer + // at a timestamp that is not yet safe to read + _ <- { + safeWatermarkO match { + case Some(safeWatermark) => + EitherTUtil.condUnitET[Future]( + timestamp <= safeWatermark, + s"Requested snapshot at $timestamp is after the safe watermark $safeWatermark", + ) + case None => + EitherT.leftT[Future, Unit](s"No safe watermark found for the sequencer") + } + } + snapshot <- EitherT.right[String](store.readStateAtTimestamp(timestamp)) + } yield snapshot + } override private[sequencing] def firstSequencerCounterServeableForSequencer: SequencerCounter = // Database sequencers are never bootstrapped diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/Sequencer.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/Sequencer.scala index 2ce7afc9b..44d0f745e 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/Sequencer.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/Sequencer.scala @@ -88,8 +88,8 @@ trait Sequencer */ def isEnabled(member: Member)(implicit traceContext: TraceContext): Future[Boolean] - def registerMemberInternal(member: Member, timestamp: CantonTimestamp)(implicit - traceContext: TraceContext + private[sequencing] def registerMemberInternal(member: Member, timestamp: CantonTimestamp)( + implicit traceContext: TraceContext ): EitherT[Future, RegisterError, Unit] def sendAsyncSigned(signedSubmission: SignedContent[SubmissionRequest])(implicit diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReader.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReader.scala index 12a5384d9..c8a493cb7 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReader.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReader.scala @@ -15,6 +15,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer.SequencerReader.ReadState import com.digitalasset.canton.domain.sequencing.sequencer.errors.CreateSubscriptionError import com.digitalasset.canton.domain.sequencing.sequencer.store.* +import com.digitalasset.canton.domain.sequencing.traffic.store.TrafficConsumedStore import com.digitalasset.canton.lifecycle.{ CloseContext, FlagCloseable, @@ -29,7 +30,7 @@ import com.digitalasset.canton.sequencing.client.{ SequencerSubscriptionError, } import com.digitalasset.canton.sequencing.protocol.* -import com.digitalasset.canton.sequencing.traffic.TrafficReceipt +import com.digitalasset.canton.sequencing.traffic.{TrafficConsumed, TrafficReceipt} import com.digitalasset.canton.sequencing.{GroupAddressResolver, OrdinarySerializedEvent} import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent import com.digitalasset.canton.store.db.DbDeserializationException @@ -78,6 +79,7 @@ class SequencerReader( syncCryptoApi: SyncCryptoClient[SyncCryptoApi], eventSignaller: EventSignaller, topologyClientMember: Member, + trafficConsumedStoreO: Option[TrafficConsumedStore], protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, @@ -355,35 +357,34 @@ class SequencerReader( // To not introduce gaps in the sequencer counters, // we deliver an empty batch to the member if it is not the sender. // This way, we can avoid revalidating the skipped events after the checkpoint we resubscribe from. - val event = if (registeredMember.memberId == sender) { - val error = - SequencerErrors.TopoologyTimestampTooEarly( - topologyTimestamp, + getTrafficReceipt(sender, sequencingTimestamp).map { trafficReceiptO => + val event = if (registeredMember.memberId == sender) { + val error = + SequencerErrors.TopoologyTimestampTooEarly( + topologyTimestamp, + sequencingTimestamp, + ) + DeliverError.create( + counter, sequencingTimestamp, + domainId, + messageId, + error, + protocolVersion, + trafficReceiptO, ) - DeliverError.create( - counter, - sequencingTimestamp, - domainId, - messageId, - error, - protocolVersion, - Option - .empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer - ) - } else - Deliver.create( - counter, - sequencingTimestamp, - domainId, - None, - emptyBatch, - None, - protocolVersion, - Option - .empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer - ) - Future.successful( + } else + Deliver.create( + counter, + sequencingTimestamp, + domainId, + None, + emptyBatch, + None, + protocolVersion, + trafficReceiptO, + ) + // This event cannot change the topology state of the client // and might not reach the topology client even // if it was originally addressed to it. @@ -396,7 +397,7 @@ class SequencerReader( topologyClientTimestampBefore, unvalidatedEvent.traceContext, ) - ) + } } } @@ -530,6 +531,28 @@ class SequencerReader( } yield OrdinarySequencedEvent(signedEvent)(traceContext) } + private def getTrafficReceipt(senderMemberId: SequencerMemberId, timestamp: CantonTimestamp)( + implicit traceContext: TraceContext + ): Future[Option[TrafficReceipt]] = { + if (registeredMember.memberId == senderMemberId) { // traffic receipt is only for the sender + trafficConsumedStoreO match { // and only if we have traffic management enabled + case Some(trafficConsumedStore) => + trafficConsumedStore.lookupAt(member, timestamp).map { + case Some(trafficConsumed) => Some(trafficConsumed.toTrafficReceipt) + case None => + logger.debug( + s"Traffic consumed not found for member $member, receipt will contain init value" + ) + TrafficConsumed.init(member).toTrafficReceipt.some + } + case None => + Future.successful(None) + } + } else { + Future.successful(None) + } + } + /** Takes our stored event and turns it back into a real sequenced event. */ private def mkSequencedEvent( @@ -553,7 +576,7 @@ class SequencerReader( _traceContext, ) => // message id only goes to sender - val messageIdO = Option(messageId).filter(_ => registeredMember.memberId == sender) + val messageIdO = Option.when(registeredMember.memberId == sender)(messageId) val batch: Batch[ClosedEnvelope] = Batch .fromByteString(protocolVersion)( payload.content @@ -595,6 +618,7 @@ class SequencerReader( memberGroupRecipients = resolvedGroupAddresses.collect { case (groupRecipient, groupMembers) if groupMembers.contains(member) => groupRecipient }.toSet + trafficReceiptO <- getTrafficReceipt(sender, timestamp) } yield { val filteredBatch = Batch.filterClosedEnvelopesFor(batch, member, memberGroupRecipients) Deliver.create[ClosedEnvelope]( @@ -605,12 +629,12 @@ class SequencerReader( filteredBatch, topologyTimestampO, protocolVersion, - Option.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer + trafficReceiptO, ) } - case ReceiptStoreEvent(_sender, messageId, topologyTimestampO, _traceContext) => - Future.successful( + case ReceiptStoreEvent(sender, messageId, topologyTimestampO, _traceContext) => + getTrafficReceipt(sender, timestamp).map(trafficReceiptO => Deliver.create[ClosedEnvelope]( counter, timestamp, @@ -619,14 +643,14 @@ class SequencerReader( emptyBatch, topologyTimestampO, protocolVersion, - Option.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer + trafficReceiptO, ) ) - case DeliverErrorStoreEvent(_, messageId, error, _traceContext) => + case DeliverErrorStoreEvent(sender, messageId, error, _traceContext) => val status = DeliverErrorStoreEvent .fromByteString(error, protocolVersion) .valueOr(err => throw new DbDeserializationException(err.toString)) - Future.successful( + getTrafficReceipt(sender, timestamp).map(trafficReceiptO => DeliverError.create( counter, timestamp, @@ -634,7 +658,7 @@ class SequencerReader( messageId, status, protocolVersion, - Option.empty[TrafficReceipt], // TODO(i19528) wire traffic consumed for DB sequencer + trafficReceiptO, ) ) } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala index fe505f6e7..d994d50cd 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala @@ -109,6 +109,7 @@ class BlockSequencer( clock, domainId, sequencerId, + Some(blockRateLimitManager.trafficConsumedStore), protocolVersion, cryptoApi, SequencerMetrics.noop("TODO"), // TODO(#18406) @@ -464,29 +465,34 @@ class BlockSequencer( .toSequencerSnapshot(protocolVersion, trafficPurchased, trafficConsumed) .tap(snapshot => if (logger.underlying.isDebugEnabled()) { - logger.debug( + logger.trace( s"Snapshot for timestamp $timestamp generated from ephemeral state:\n$blockEphemeralState" ) - logger.debug( - s"Resulting snapshot for timestamp $timestamp:\n$snapshot" - ) } ) }, ) finalSnapshot <- { if (unifiedSequencer) { - super.snapshot(timestamp).map { dbsSnapshot => + super.snapshot(bsSnapshot.lastTs).map { dbsSnapshot => dbsSnapshot.copy( + latestBlockHeight = bsSnapshot.latestBlockHeight, inFlightAggregations = bsSnapshot.inFlightAggregations, additional = bsSnapshot.additional, + trafficPurchased = bsSnapshot.trafficPurchased, + trafficConsumed = bsSnapshot.trafficConsumed, )(dbsSnapshot.representativeProtocolVersion) } } else { EitherT.pure[Future, String](bsSnapshot) } } - } yield finalSnapshot + } yield { + logger.trace( + s"Resulting snapshot for timestamp $timestamp:\n$finalSnapshot" + ) + finalSnapshot + } } override def pruningStatus(implicit @@ -700,14 +706,27 @@ class BlockSequencer( override def trafficStatus(requestedMembers: Seq[Member], selector: TimestampSelector)(implicit traceContext: TraceContext ): FutureUnlessShutdown[SequencerTrafficStatus] = { - trafficStatesForMembers( - if (requestedMembers.isEmpty) { - // If requestedMembers get the traffic states of all known member in the head state - stateManager.getHeadState.chunk.ephemeral.status.membersMap.keySet - } else requestedMembers.toSet, - selector, - ) - .map(SequencerTrafficStatus.apply) + for { + members <- + if (requestedMembers.isEmpty) { + // If requestedMembers is not set get the traffic states of all known members + if (unifiedSequencer) { + FutureUnlessShutdown.outcomeF( + cryptoApi.currentSnapshotApproximation.ipsSnapshot.allMembers() + ) + } else { + FutureUnlessShutdown.pure( + stateManager.getHeadState.chunk.ephemeral.status.membersMap.keySet + ) + } + } else { + FutureUnlessShutdown.pure(requestedMembers.toSet) + } + trafficState <- trafficStatesForMembers( + members, + selector, + ) + } yield SequencerTrafficStatus(trafficState) } override def getTrafficStateAt(member: Member, timestamp: CantonTimestamp)(implicit diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala index 3f231f8ef..6d78fda9c 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala @@ -222,7 +222,6 @@ abstract class BlockSequencerFactory( nodeParameters.enableAdditionalConsistencyChecks, nodeParameters.processingTimeouts, domainLoggerFactory, - rateLimitManager, nodeParameters.useUnifiedSequencer, ) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala index eb1a3889d..0481c20b8 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala @@ -162,10 +162,9 @@ class DbSequencerStore( case object Receipt extends EventTypeDiscriminator('R') case object Deliver extends EventTypeDiscriminator('D') - case object Error extends EventTypeDiscriminator('E') - private val all = Seq[EventTypeDiscriminator](Deliver, Error) + private val all = Seq[EventTypeDiscriminator](Deliver, Error, Receipt) def fromChar(value: Char): Either[String, EventTypeDiscriminator] = all.find(_.value == value).toRight(s"Event type discriminator for value [$value] not found") @@ -841,6 +840,9 @@ class DbSequencerStore( query.as[Option[CantonTimestamp]].headOption.map(_.flatten) } + override def safeWatermark(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] = + storage.query(safeWaterMarkDBIO, "query safe watermark") + override def readStateAtTimestamp( timestamp: CantonTimestamp )(implicit traceContext: TraceContext): Future[SequencerSnapshot] = { diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala index cab6988ce..e874a8fa3 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/InMemorySequencerStore.scala @@ -132,6 +132,9 @@ class InMemorySequencerStore( ): Future[Option[Watermark]] = Future.successful(watermark.get.map(Watermark(_, online = true))) + override def safeWatermark(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] = + Future.successful(watermark.get) + override def goOffline(instanceIndex: Int)(implicit traceContext: TraceContext): Future[Unit] = Future.unit diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala index a007ff55d..5732588e3 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala @@ -532,6 +532,10 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut traceContext: TraceContext ): Future[Option[Watermark]] + /** Return the minimum watermark across all online sequencers + */ + def safeWatermark(implicit traceContext: TraceContext): Future[Option[CantonTimestamp]] + /** Flag that we're going offline (likely due to a shutdown) */ def goOffline(instanceIndex: Int)(implicit traceContext: TraceContext): Future[Unit] @@ -790,10 +794,14 @@ trait SequencerStore extends SequencerMemberValidator with NamedLogging with Aut if (!memberStatus.enabled) eitherT(disableMember(id)) else EitherT.rightT[Future, String](()) _ <- eitherT(memberStatus.lastAcknowledged.fold(Future.unit)(ack => acknowledge(id, ack))) - _ <- saveCounterCheckpoint( - id, - CounterCheckpoint(snapshot.heads(memberStatus.member), lastTs, Some(lastTs)), - ).leftMap(_.toString) + _ <- + // Some members can be registered, but not have any events yet, so there can be no CounterCheckpoint in the snapshot + snapshot.heads.get(memberStatus.member).fold(eitherT[String](Future.unit)) { counter => + saveCounterCheckpoint( + id, + CounterCheckpoint(counter, lastTs, Some(lastTs)), + ).leftMap(_.toString) + } } yield () } _ <- saveLowerBound(lastTs).leftMap(_.toString) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala index ba9312469..4dcc2cfdf 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManager.scala @@ -587,7 +587,7 @@ class EnterpriseSequencerRateLimitManager( } } yield { // Here we correctly consumed the traffic, so submitted cost and consumed cost are the same - trafficConsumed.toTrafficReceipt(consumedCost = cost) + trafficConsumed.toTrafficReceipt } } @@ -630,11 +630,13 @@ class EnterpriseSequencerRateLimitManager( // Update the traffic consumed at sequencing time, and convert it to a receipt. Cost = 0 because we failed to consume traffic ensureTrafficConsumedAtSequencingTime(snapshotAtSequencingTime) .map( - _.map( - _.toTrafficReceipt( - consumedCost = NonNegativeLong.zero + _.map { trafficConsumed => + require( + trafficConsumed.lastConsumedCost.unwrap == 0L, + "Consumed cost should be zero", ) - ) + trafficConsumed.toTrafficReceipt + } ) ) } yield { diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/store/db/DbTrafficConsumedStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/store/db/DbTrafficConsumedStore.scala index 0369e74e4..4e41c1223 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/store/db/DbTrafficConsumedStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/store/db/DbTrafficConsumedStore.scala @@ -33,8 +33,9 @@ class DbTrafficConsumedStore( trafficConsumed: TrafficConsumed )(implicit traceContext: TraceContext): Future[Unit] = { val insertSql = - sqlu"""insert into seq_traffic_control_consumed_journal (member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder) - values (${trafficConsumed.member}, ${trafficConsumed.sequencingTimestamp}, ${trafficConsumed.extraTrafficConsumed}, ${trafficConsumed.baseTrafficRemainder}) on conflict do nothing""" + sqlu"""insert into seq_traffic_control_consumed_journal (member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost) + values (${trafficConsumed.member}, ${trafficConsumed.sequencingTimestamp}, ${trafficConsumed.extraTrafficConsumed}, ${trafficConsumed.baseTrafficRemainder}, ${trafficConsumed.lastConsumedCost}) + on conflict do nothing""" storage.update_(insertSql, functionFullName) } @@ -43,7 +44,10 @@ class DbTrafficConsumedStore( member: Member )(implicit traceContext: TraceContext): Future[Seq[TrafficConsumed]] = { val query = - sql"select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder from seq_traffic_control_consumed_journal where member = $member order by sequencing_timestamp asc" + sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost + from seq_traffic_control_consumed_journal + where member = $member + order by sequencing_timestamp asc""" storage.query(query.as[TrafficConsumed], functionFullName) } @@ -51,17 +55,21 @@ class DbTrafficConsumedStore( member: Member )(implicit traceContext: TraceContext): Future[Option[TrafficConsumed]] = { val query = - sql"select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder from seq_traffic_control_consumed_journal where member = $member order by sequencing_timestamp desc" + sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost + from seq_traffic_control_consumed_journal + where member = $member + order by sequencing_timestamp desc""" storage.querySingle(query.as[TrafficConsumed].headOption, functionFullName).value } override def lookupLatestBeforeInclusive(timestamp: CantonTimestamp)(implicit traceContext: TraceContext ): Future[Seq[TrafficConsumed]] = { + // TODO(#18394): Check if performance of this query is good (looks a lot like a group by) val query = - sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder + sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost from - (select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, + (select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost, rank() over (partition by member order by sequencing_timestamp desc) as pos from seq_traffic_control_consumed_journal where sequencing_timestamp <= $timestamp @@ -75,10 +83,11 @@ class DbTrafficConsumedStore( def lookupLatestBeforeInclusiveForMember(member: Member, timestamp: CantonTimestamp)(implicit traceContext: TraceContext ): Future[Option[TrafficConsumed]] = { + // TODO(#18394): Check if performance of this query is good (looks a lot like a group by) val query = - sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder + sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost from - (select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, + (select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost, rank() over (partition by member order by sequencing_timestamp desc) as pos from seq_traffic_control_consumed_journal where sequencing_timestamp <= $timestamp and member = $member @@ -93,7 +102,9 @@ class DbTrafficConsumedStore( traceContext: TraceContext ): Future[Option[TrafficConsumed]] = { val query = - sql"select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder from seq_traffic_control_consumed_journal where member = $member and sequencing_timestamp = $timestamp" + sql"""select member, sequencing_timestamp, extra_traffic_consumed, base_traffic_remainder, last_consumed_cost + from seq_traffic_control_consumed_journal + where member = $member and sequencing_timestamp = $timestamp""" storage.querySingle(query.as[TrafficConsumed].headOption, functionFullName).value } @@ -105,6 +116,7 @@ class DbTrafficConsumedStore( // upToExclusive, we need to keep it. // To do that we first find the latest timestamp for all members before the pruning timestamp. // Then we delete all rows below that timestamp for each member. + // TODO(#18394): Check performance of the group by query here val deleteQuery = sqlu"""with last_before_pruning_timestamp(member, sequencing_timestamp) as ( select member, max(sequencing_timestamp) diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationRequestAndResponseProcessorTest.scala similarity index 97% rename from community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala rename to community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationRequestAndResponseProcessorTest.scala index 2546dce65..7780bd3b4 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationResponseProcessorTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/mediator/ConfirmationRequestAndResponseProcessorTest.scala @@ -45,7 +45,7 @@ import scala.jdk.CollectionConverters.* import scala.language.reflectiveCalls @nowarn("msg=match may not be exhaustive") -class ConfirmationResponseProcessorTest +class ConfirmationRequestAndResponseProcessorTest extends AsyncWordSpec with BaseTest with HasTestCloseContext @@ -134,8 +134,8 @@ class ConfirmationResponseProcessorTest SymbolicCrypto.create(testedReleaseProtocolVersion, timeouts, loggerFactory) private lazy val topology: TestingTopology = TestingTopology( - Set(domainId), - Map( + domains = Set(domainId), + topology = Map( submitter -> Map(participant -> ParticipantPermission.Confirmation), signatory -> Map(participant -> ParticipantPermission.Confirmation), @@ -144,8 +144,8 @@ class ConfirmationResponseProcessorTest extra -> Map(participant -> ParticipantPermission.Observation), ), - Set(mediatorGroup, mediatorGroup2), - sequencerGroup, + mediatorGroups = Set(mediatorGroup, mediatorGroup2), + sequencerGroup = sequencerGroup, ) private lazy val identityFactory = TestingIdentityFactory( @@ -158,14 +158,14 @@ class ConfirmationResponseProcessorTest private lazy val identityFactory2 = { val topology2 = TestingTopology( - Set(domainId), - Map( + domains = Set(domainId), + topology = Map( submitter -> Map(participant1 -> ParticipantPermission.Confirmation), signatory -> Map(participant2 -> ParticipantPermission.Confirmation), observer -> Map(participant3 -> ParticipantPermission.Confirmation), ), - Set(mediatorGroup), - sequencerGroup, + mediatorGroups = Set(mediatorGroup), + sequencerGroup = sequencerGroup, ) TestingIdentityFactory( topology2, @@ -190,12 +190,12 @@ class ConfirmationResponseProcessorTest private lazy val identityFactoryOnlySubmitter = TestingIdentityFactory( TestingTopology( - Set(domainId), - Map( + domains = Set(domainId), + topology = Map( submitter -> Map(participant1 -> ParticipantPermission.Confirmation) ), - Set(mediatorGroup0(NonEmpty.mk(Seq, mediatorId))), - sequencerGroup, + mediatorGroups = Set(mediatorGroup0(NonEmpty.mk(Seq, mediatorId))), + sequencerGroup = sequencerGroup, ), loggerFactory, dynamicDomainParameters = initialDomainParameters, @@ -238,7 +238,7 @@ class ConfirmationResponseProcessorTest timeouts, loggerFactory, ) - val processor = new ConfirmationResponseProcessor( + val processor = new ConfirmationRequestAndResponseProcessor( domainId, mediatorId, verdictSender, @@ -290,7 +290,7 @@ class ConfirmationResponseProcessorTest .failOnShutdown .futureValue - "TransactionConfirmationResponseProcessor" should { + "ConfirmationRequestAndResponseProcessor" should { def shouldBeViewThresholdBelowMinimumAlarm( requestId: RequestId, viewPosition: ViewPosition, @@ -518,10 +518,24 @@ class ConfirmationResponseProcessorTest ), Recipients.cc(MemberRecipient(participant3), mediatorGroupRecipient), ), + "group addresses and member recipients" -> Seq( + Recipients.recipientGroups( + NonEmpty.mk( + Seq, + NonEmpty.mk( + Set, + ParticipantsOfParty(PartyId.tryFromLfParty(submitter)), + mediatorGroupRecipient, + ), + NonEmpty.mk(Set, MemberRecipient(participant2), mediatorGroupRecipient), + NonEmpty.mk(Set, MemberRecipient(participant3), mediatorGroupRecipient), + ) + ) + ), ) - sequentialTraverse_(tests.zipWithIndex) { case ((_testName, recipients), i) => - withClueF("testname") { + sequentialTraverse_(tests.zipWithIndex) { case ((testName, recipients), i) => + withClueF(testName) { val rootHashMessages = recipients.map(r => OpenEnvelope(rootHashMessage, r)(testedProtocolVersion)) val ts = CantonTimestamp.ofEpochSecond(i.toLong) @@ -645,7 +659,7 @@ class ConfirmationResponseProcessorTest correctRootHashMessage -> Recipients .cc(mediatorGroupRecipient, MemberRecipient(participant)), correctRootHashMessage.copy( - payload = SerializedRootHashMessagePayload(ByteString.copyFromUtf8("other paylroosoad")) + payload = SerializedRootHashMessagePayload(ByteString.copyFromUtf8("other payload")) ) -> Recipients .cc(mediatorGroupRecipient, MemberRecipient(otherParticipant)), ) @@ -681,7 +695,7 @@ class ConfirmationResponseProcessorTest (batchWithSuperfluousRootHashMessage -> show"Superfluous root hash message for members: $otherParticipant") -> List(Set[Member](participant, otherParticipant) -> correctViewType), - (batchWithDifferentPayloads -> show"Different payloads in root hash messages. Sizes: 0, 17.") -> + (batchWithDifferentPayloads -> show"Different payloads in root hash messages. Sizes: 0, 13.") -> List(Set[Member](participant, otherParticipant) -> correctViewType), ) // format: on diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala index 9ad9ba8fa..96ad81660 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala @@ -12,6 +12,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.Sequencer as CantonSe import com.digitalasset.canton.protocol.DynamicDomainParameters import com.digitalasset.canton.resource.MemoryStorage import com.digitalasset.canton.sequencing.protocol.{Recipients, SubmissionRequest} +import com.digitalasset.canton.sequencing.traffic.TrafficReceipt import com.digitalasset.canton.topology.{MediatorId, TestingIdentityFactory, TestingTopology} import org.apache.pekko.stream.Materializer @@ -55,6 +56,8 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest { override protected def supportAggregation: Boolean = false + override protected def defaultExpectedTrafficReceipt: Option[TrafficReceipt] = None + "Database snapshotting" should { "allow a new separate database to be created" in { env => @@ -95,7 +98,16 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest { ) checkMessages(List(details), messages) } - snapshot <- valueOrFail(sequencer.snapshot(CantonTimestamp.MaxValue))("get snapshot") + + error <- sequencer + .snapshot(CantonTimestamp.MaxValue) + .leftOrFail("snapshotting after the watermark is expected to fail") + _ <- error should include(" is after the safe watermark") + + // Note: below we use the timestamp that is currently the safe watermark in the sequencer + snapshot <- valueOrFail(sequencer.snapshot(CantonTimestamp.Epoch.immediateSuccessor))( + "get snapshot" + ) // create a second separate sequencer from the snapshot secondSequencer = createSequencerWithSnapshot( @@ -103,8 +115,16 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest { Some(snapshot), ) + // TODO(#18405): Currently crash recovery of DBS resets the watermark to a wrong value (epoch) leading to + // the second snapshot failing due to newly added watermark check. This is a temp workaround to avoid that. + _ <- secondSequencer.store + .saveWatermark(instanceIndex = 0, snapshot.lastTs) + .valueOrFail("save watermark") + // the snapshot from the second sequencer should look the same except that the lastTs will become the lower bound - snapshot2 <- valueOrFail(secondSequencer.snapshot(CantonTimestamp.MaxValue))("get snapshot") + snapshot2 <- valueOrFail( + secondSequencer.snapshot(CantonTimestamp.Epoch.immediateSuccessor) + )("get snapshot") _ = { snapshot2 shouldBe (snapshot.copy(status = snapshot.status.copy(lowerBound = snapshot.lastTs) diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerApiTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerApiTest.scala index d4aeb1af5..99613c979 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerApiTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerApiTest.scala @@ -142,6 +142,8 @@ abstract class SequencerApiTest protected def supportAggregation: Boolean + protected def defaultExpectedTrafficReceipt: Option[TrafficReceipt] + protected def runSequencerApiTests(): Unit = { "The sequencers" should { "send a batch to one recipient" in { env => @@ -160,7 +162,7 @@ abstract class SequencerApiTest SequencerCounter(0), sender, Some(request.messageId), - None, + defaultExpectedTrafficReceipt, EnvelopeDetails(messageContent, recipients), ) checkMessages(List(details), messages) @@ -253,7 +255,7 @@ abstract class SequencerApiTest SequencerCounter.Genesis, sender, Some(request1.messageId), - None, + defaultExpectedTrafficReceipt, EnvelopeDetails(normalMessageContent, recipients), ) checkMessages(List(details), messages) @@ -278,7 +280,7 @@ abstract class SequencerApiTest SequencerCounter.Genesis, member, Option.when(member == sender)(request.messageId), - None, + if (member == sender) defaultExpectedTrafficReceipt else None, EnvelopeDetails(messageContent, recipients.forMember(member, Set.empty).value), ) } @@ -323,12 +325,26 @@ abstract class SequencerApiTest } yield { // p6 gets the receipt immediately checkMessages( - Seq(EventDetails(SequencerCounter.Genesis, p6, Some(request1.messageId), None)), + Seq( + EventDetails( + SequencerCounter.Genesis, + p6, + Some(request1.messageId), + defaultExpectedTrafficReceipt, + ) + ), reads1, ) // p9 gets the receipt only checkMessages( - Seq(EventDetails(SequencerCounter.Genesis, p9, Some(request2.messageId), None)), + Seq( + EventDetails( + SequencerCounter.Genesis, + p9, + Some(request2.messageId), + defaultExpectedTrafficReceipt, + ) + ), reads2, ) // p10 gets the message @@ -337,8 +353,8 @@ abstract class SequencerApiTest EventDetails( SequencerCounter.Genesis, p10, - None, - None, + messageId = None, + trafficReceipt = None, EnvelopeDetails(messageContent, Recipients.cc(p10)), ) ), @@ -429,7 +445,7 @@ abstract class SequencerApiTest } reads3 <- readForMembers(Seq(p6), sequencer) } yield { - checkRejection(reads3, p6, request1.messageId) { + checkRejection(reads3, p6, request1.messageId, defaultExpectedTrafficReceipt) { case SequencerErrors.MaxSequencingTimeTooFar(reason) => reason should ( include(s"Max sequencing time") and @@ -522,7 +538,14 @@ abstract class SequencerApiTest ) } yield { checkMessages( - Seq(EventDetails(SequencerCounter.Genesis, p11, Some(request1.messageId), None)), + Seq( + EventDetails( + SequencerCounter.Genesis, + p11, + Some(request1.messageId), + defaultExpectedTrafficReceipt, + ) + ), reads11, ) checkMessages( @@ -531,14 +554,14 @@ abstract class SequencerApiTest SequencerCounter.Genesis, p12, Some(request1.messageId), - None, + defaultExpectedTrafficReceipt, EnvelopeDetails(content2, recipients2, envs1(1).signatures ++ envs2(1).signatures), ), EventDetails( SequencerCounter.Genesis, p13, - None, - None, + messageId = None, + trafficReceipt = None, EnvelopeDetails(content1, recipients1, envs1(0).signatures ++ envs2(0).signatures), EnvelopeDetails(content2, recipients2, envs1(1).signatures ++ envs2(1).signatures), ), @@ -550,15 +573,15 @@ abstract class SequencerApiTest EventDetails( SequencerCounter.Genesis + 1, p11, - None, - None, + messageId = None, + trafficReceipt = None, EnvelopeDetails(content1, recipients1, envs1(0).signatures ++ envs2(0).signatures), ) ), reads12a, ) - checkRejection(reads13, p13, messageId3) { + checkRejection(reads13, p13, messageId3, defaultExpectedTrafficReceipt) { case SequencerErrors.AggregateSubmissionAlreadySent(reason) => reason should ( include(s"The aggregatable request with aggregation ID") and @@ -635,10 +658,17 @@ abstract class SequencerApiTest reads15 <- readForMembers(Seq(p15), sequencer) } yield { checkMessages( - Seq(EventDetails(SequencerCounter.Genesis, p14, Some(request1.messageId), None)), + Seq( + EventDetails( + SequencerCounter.Genesis, + p14, + Some(request1.messageId), + defaultExpectedTrafficReceipt, + ) + ), reads14, ) - checkRejection(reads14a, p14, messageId2) { + checkRejection(reads14a, p14, messageId2, defaultExpectedTrafficReceipt) { case SequencerErrors.AggregateSubmissionStuffing(reason) => reason should include( s"The sender ${p14} previously contributed to the aggregatable submission with ID" @@ -653,7 +683,13 @@ abstract class SequencerApiTest checkMessages( Seq( - EventDetails(SequencerCounter.Genesis + 2, p14, None, None, deliveredEnvelopeDetails) + EventDetails( + SequencerCounter.Genesis + 2, + p14, + messageId = None, + trafficReceipt = None, + deliveredEnvelopeDetails, + ) ), reads14b, ) @@ -663,7 +699,7 @@ abstract class SequencerApiTest SequencerCounter.Genesis, p15, Some(messageId3), - None, + defaultExpectedTrafficReceipt, deliveredEnvelopeDetails, ) ), @@ -725,7 +761,7 @@ abstract class SequencerApiTest _ <- sequencer.sendAsyncSigned(sign(request)).valueOrFailShutdown("Sent async") reads <- readForMembers(Seq(p17), sequencer) } yield { - checkRejection(reads, p17, messageId) { + checkRejection(reads, p17, messageId, defaultExpectedTrafficReceipt) { case SequencerErrors.SubmissionRequestMalformed(reason) => reason should include("Threshold 2 cannot be reached") } @@ -755,7 +791,7 @@ abstract class SequencerApiTest _ <- sequencer.sendAsyncSigned(sign(request)).valueOrFailShutdown("Sent async") reads <- readForMembers(Seq(p18), sequencer) } yield { - checkRejection(reads, p18, messageId) { + checkRejection(reads, p18, messageId, defaultExpectedTrafficReceipt) { case SequencerErrors.SubmissionRequestMalformed(reason) => reason should include("Sender is not eligible according to the aggregation rule") } @@ -921,7 +957,7 @@ trait SequencerApiTestUtils got: Seq[(Member, OrdinarySerializedEvent)], sender: Member, expectedMessageId: MessageId, - expectedTrafficReceipt: Option[TrafficReceipt] = None, + expectedTrafficReceipt: Option[TrafficReceipt], )(assertReason: PartialFunction[Status, Assertion]): Assertion = { got match { case Seq((`sender`, event)) => diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala index bc6d88bbe..0677c5ed7 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerReaderTest.scala @@ -133,6 +133,7 @@ class SequencerReaderTest extends FixtureAsyncWordSpec with BaseTest { cryptoD, eventSignaller, topologyClientMember, + trafficConsumedStoreO = None, testedProtocolVersion, timeouts, loggerFactory, diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala index 2eb16870b..d3952284f 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStoreTest.scala @@ -48,6 +48,7 @@ trait SequencerStoreTest val ts1 = ts(1) val ts2 = ts(2) val ts3 = ts(3) + val ts4 = ts(4) val payloadBytes1 = ByteString.copyFromUtf8("1") val payloadBytes2 = ByteString.copyFromUtf8("1") @@ -57,6 +58,7 @@ trait SequencerStoreTest val messageId1 = MessageId.tryCreate("1") val messageId2 = MessageId.tryCreate("2") val messageId3 = MessageId.tryCreate("3") + val messageId4 = MessageId.tryCreate("4") val instanceDiscriminator1 = UUID.randomUUID() val instanceDiscriminator2 = UUID.randomUUID() @@ -101,6 +103,24 @@ trait SequencerStoreTest ), ) + def deliverReceipt( + ts: CantonTimestamp, + sender: Member, + messageId: MessageId, + topologyTimestamp: CantonTimestamp, + ): Future[Sequenced[PayloadId]] = + for { + senderId <- store.registerMember(sender, ts) + } yield Sequenced( + ts, + ReceiptStoreEvent( + senderId, + messageId, + topologyTimestampO = Some(topologyTimestamp), + traceContext, + ), + ) + def lookupRegisteredMember(member: Member): Future[SequencerMemberId] = for { registeredMemberO <- store.lookupMember(member) @@ -151,6 +171,35 @@ trait SequencerStoreTest } } + def assertReceiptEvent( + event: Sequenced[Payload], + expectedTimestamp: CantonTimestamp, + expectedSender: Member, + expectedMessageId: MessageId, + expectedTopologyTimestamp: Option[CantonTimestamp], + ): Future[Assertion] = { + for { + senderId <- lookupRegisteredMember(expectedSender) + } yield { + event.timestamp shouldBe expectedTimestamp + event.event match { + case ReceiptStoreEvent( + sender, + messageId, + topologyTimestampO, + _traceContext, + ) => + sender shouldBe senderId + messageId shouldBe expectedMessageId + event.event.members shouldBe Set(senderId) + event.event.payloadO shouldBe None + topologyTimestampO shouldBe expectedTopologyTimestamp + case other => + fail(s"Expected deliver receipt but got $other") + } + } + } + /** Save payloads using the default `instanceDiscriminator1` and expecting it to succeed */ def savePayloads(payloads: NonEmpty[Seq[Payload]]): Future[Unit] = valueOrFail(store.savePayloads(payloads, instanceDiscriminator1))("savePayloads") @@ -272,15 +321,16 @@ trait SequencerStoreTest payload2.id, recipients = Set(alice, bob), ) + receiptAlice <- env.deliverReceipt(ts4, alice, messageId4, ts3) deliverEventBob <- env.deliverEvent(ts3, bob, messageId3, payload3.id) _ <- env.store.saveEvents( instanceIndex, - NonEmpty(Seq, deliverEventAlice, deliverEventAll, deliverEventBob), + NonEmpty(Seq, deliverEventAlice, deliverEventAll, deliverEventBob, receiptAlice), ) - _ <- env.saveWatermark(deliverEventBob.timestamp).valueOrFail("saveWatermark") + _ <- env.saveWatermark(receiptAlice.timestamp).valueOrFail("saveWatermark") aliceEvents <- env.readEvents(alice) bobEvents <- env.readEvents(bob) - _ = aliceEvents should have size (2) + _ = aliceEvents should have size (3) _ = bobEvents should have size (2) _ <- env.assertDeliverEvent(aliceEvents(0), ts1, alice, messageId1, Set(alice), payload1) _ <- env.assertDeliverEvent( @@ -291,6 +341,13 @@ trait SequencerStoreTest Set(alice, bob), payload2, ) + _ <- env.assertReceiptEvent( + aliceEvents(2), + ts4, + alice, + messageId4, + ts3.some, + ) _ <- env.assertDeliverEvent( bobEvents(0), ts2, diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala index 056fb482f..ec40a11e1 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerIntegrationTest.scala @@ -273,6 +273,7 @@ final case class Env(loggerFactory: NamedLoggerFactory)(implicit loggerFactory, ProtocolVersionCompatibility.supportedProtocolsParticipant( includeUnstableVersions = BaseTest.testedProtocolVersion.isUnstable, + includeBetaVersions = BaseTest.testedProtocolVersion.isBeta, release = ReleaseVersion.current, ), Some(BaseTest.testedProtocolVersion), diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManagerTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManagerTest.scala index db6123696..8d3d91505 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManagerTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/EnterpriseSequencerRateLimitManagerTest.scala @@ -133,11 +133,12 @@ class EnterpriseSequencerRateLimitManagerTest expectedExtraTrafficPurchased: NonNegativeLong = trafficPurchased, expectedTrafficConsumed: NonNegativeLong = expectedExtraTrafficConsumed, expectedBaseTrafficRemainder: NonNegativeLong = NonNegativeLong.zero, + expectedLastConsumedCost: NonNegativeLong = eventCostNonNegative, expectedSerial: Option[PositiveInt] = serial, timestamp: CantonTimestamp = sequencingTs, )(implicit f: Env) = for { states <- f.rlm - .getStates(Set(sender), Some(sequencingTs), None, warnIfApproximate = false) + .getStates(Set(sender), Some(timestamp), None, warnIfApproximate = false) .failOnShutdown } yield states.get(sender) shouldBe Some( Right( @@ -145,6 +146,7 @@ class EnterpriseSequencerRateLimitManagerTest expectedExtraTrafficPurchased, expectedTrafficConsumed, expectedBaseTrafficRemainder, + expectedLastConsumedCost, timestamp, expectedSerial, ) @@ -166,6 +168,7 @@ class EnterpriseSequencerRateLimitManagerTest expectedExtraTrafficPurchased, expectedTrafficConsumed, expectedBaseTrafficRemainder, + NonNegativeLong.zero, sequencingTs, expectedSerial, ) @@ -295,6 +298,7 @@ class EnterpriseSequencerRateLimitManagerTest NonNegativeLong.zero, NonNegativeLong.zero, maxBaseTrafficRemainder, + NonNegativeLong.zero, sequencerTs, None, ), @@ -478,6 +482,46 @@ class EnterpriseSequencerRateLimitManagerTest } } + "consumed cost resets to 0 when advancing the timestamp with no traffic being used" in { + implicit f => + returnCorrectCost + + val expected = Right( + Some( + TrafficReceipt( + consumedCost = NonNegativeLong.one, + extraTrafficConsumed = NonNegativeLong.zero, + baseTrafficRemainder = maxBaseTrafficRemainder.tryAdd(-1L), + ) + ) + ) + + for { + _ <- purchaseTraffic + res <- consume( // only uses the base traffic + cost = Some(NonNegativeLong.one), + correctCost = NonNegativeLong.one, + sequencingTimestamp = sequencingTs.plusMillis(1), + ) + _ <- assertTrafficConsumed( + timestamp = sequencingTs.plusMillis(1), + expectedTrafficConsumed = NonNegativeLong.zero, + expectedBaseTrafficRemainder = + maxBaseTrafficRemainder.tryAdd(-1L), // only uses the base traffic + expectedLastConsumedCost = NonNegativeLong.one, + ) + _ <- assertTrafficConsumed( + timestamp = sequencingTs.plusSeconds(1), // after a full second + expectedTrafficConsumed = NonNegativeLong.zero, + expectedBaseTrafficRemainder = + maxBaseTrafficRemainder, // base traffic is back to maximum + expectedLastConsumedCost = NonNegativeLong.zero, // last consumed cost is reset to 0 + ) + } yield { + res shouldBe expected + } + } + "advance traffic consumed timestamp even when not consuming because not enough traffic" in { implicit f => returnCorrectCost @@ -490,6 +534,7 @@ class EnterpriseSequencerRateLimitManagerTest NonNegativeLong.zero, NonNegativeLong.zero, maxBaseTrafficRemainder, + NonNegativeLong.zero, sequencingTs, None, ), @@ -512,6 +557,7 @@ class EnterpriseSequencerRateLimitManagerTest _ <- assertTrafficConsumed( expectedTrafficConsumed = NonNegativeLong.zero, expectedBaseTrafficRemainder = NonNegativeLong.tryCreate(4), + expectedLastConsumedCost = NonNegativeLong.one, ) // then at sequencingTs.plusMillis(1) res2 <- consume( @@ -522,6 +568,7 @@ class EnterpriseSequencerRateLimitManagerTest _ <- assertTrafficConsumed( expectedTrafficConsumed = NonNegativeLong.zero, expectedBaseTrafficRemainder = NonNegativeLong.tryCreate(3), + expectedLastConsumedCost = NonNegativeLong.one, timestamp = sequencingTs.plusMillis(1), ) // then repeat consume at sequencingTs, which simulates a crash recovery that replays the event @@ -530,6 +577,7 @@ class EnterpriseSequencerRateLimitManagerTest _ <- assertTrafficConsumed( expectedTrafficConsumed = NonNegativeLong.zero, expectedBaseTrafficRemainder = NonNegativeLong.tryCreate(3), + expectedLastConsumedCost = NonNegativeLong.one, timestamp = sequencingTs.plusMillis(1), ) } yield { @@ -562,7 +610,10 @@ class EnterpriseSequencerRateLimitManagerTest for { _ <- purchaseTraffic res <- consume(cost = Some(incorrectSubmissionCostNN)) - _ <- assertTrafficConsumed(expectedTrafficConsumed = NonNegativeLong.one) + _ <- assertTrafficConsumed( + expectedTrafficConsumed = NonNegativeLong.one, + expectedLastConsumedCost = incorrectSubmissionCostNN, + ) } yield { res shouldBe Right( Some( diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/store/TrafficConsumedStoreTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/store/TrafficConsumedStoreTest.scala index da5574bc2..aeac5f358 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/store/TrafficConsumedStoreTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/traffic/store/TrafficConsumedStoreTest.scala @@ -34,6 +34,7 @@ trait TrafficConsumedStoreTest t1, NonNegativeLong.tryCreate(3), NonNegativeLong.tryCreate(20L), + NonNegativeLong.tryCreate(5L), ) val consumedAlice2 = consumedAlice1.copy(sequencingTimestamp = t2) val consumedAlice3 = consumedAlice1.copy(sequencingTimestamp = t3) @@ -43,6 +44,7 @@ trait TrafficConsumedStoreTest t1, NonNegativeLong.tryCreate(3), NonNegativeLong.tryCreate(20L), + NonNegativeLong.tryCreate(10L), ) val consumedBob2 = consumedBob1.copy(sequencingTimestamp = t2) val consumedBob3 = consumedBob1.copy(sequencingTimestamp = t3) @@ -168,21 +170,35 @@ trait TrafficConsumedStoreTest val store = mk() val aliceConsumed = Seq( - TrafficConsumed(alice.member, t1, NonNegativeLong.one, NonNegativeLong.tryCreate(5L)), + TrafficConsumed( + alice.member, + t1, + NonNegativeLong.one, + NonNegativeLong.tryCreate(5L), + NonNegativeLong.tryCreate(5L), + ), TrafficConsumed( alice.member, t3, NonNegativeLong.tryCreate(2), NonNegativeLong.tryCreate(55L), + NonNegativeLong.tryCreate(4L), ), ) val bobConsumed = Seq( - TrafficConsumed(bob.member, t2, NonNegativeLong.one, NonNegativeLong.tryCreate(10L)), + TrafficConsumed( + bob.member, + t2, + NonNegativeLong.one, + NonNegativeLong.tryCreate(10L), + NonNegativeLong.tryCreate(5L), + ), TrafficConsumed( bob.member, t4, NonNegativeLong.tryCreate(2), NonNegativeLong.tryCreate(100L), + NonNegativeLong.tryCreate(3L), ), ) diff --git a/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/IntegrationTestUtilities.scala b/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/IntegrationTestUtilities.scala index 73dc5f5a8..5059dd6d3 100644 --- a/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/IntegrationTestUtilities.scala +++ b/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/IntegrationTestUtilities.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.{DomainAlias, LfTimestamp} import org.scalatest.exceptions.TestFailedException -import scala.annotation.tailrec +import scala.annotation.{nowarn, tailrec} import scala.concurrent.duration.{Duration, FiniteDuration} object IntegrationTestUtilities { @@ -37,6 +37,7 @@ object IntegrationTestUtilities { def maxCount: Int = pcsCount max acceptedTransactionCount } + @nowarn("msg=usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer") def grabCountsRemote( domain: DomainAlias, pr: SyncStateInspection, diff --git a/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/command_inspection_service.proto b/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/command_inspection_service.proto new file mode 100644 index 000000000..08e5e56de --- /dev/null +++ b/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/admin/command_inspection_service.proto @@ -0,0 +1,73 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.daml.ledger.api.v2.admin; + +import "com/daml/ledger/api/v2/commands.proto"; +import "com/daml/ledger/api/v2/completion.proto"; +import "com/daml/ledger/api/v2/value.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Com.Daml.Ledger.Api.V2.Admin"; +option java_outer_classname = "CommandInspectionServiceOuterClass"; +option java_package = "com.daml.ledger.api.v2.admin"; + +// Status: experimental interface, will change before it is deemed production +// ready +// +// The inspection service provides methods for the ledger administrator +// to look under the hood of a running system. +// In V2 Ledger API this service is not available. +service CommandInspectionService { + // Inquire about the status of a command. + // This service is used for debugging only. The command status is only tracked in memory and is not persisted. + // The service can be used to understand the failure status and the structure of a command. + // Requires admin privileges + // The service is alpha without backward compatibility guarantees. + rpc GetCommandStatus(GetCommandStatusRequest) returns (GetCommandStatusResponse); +} + +enum CommandState { + COMMAND_STATE_UNSPECIFIED = 0; // This value acts as wildcard in the queries + COMMAND_STATE_PENDING = 1; + COMMAND_STATE_SUCCEEDED = 2; + COMMAND_STATE_FAILED = 3; +} + +message GetCommandStatusRequest { + string command_id_prefix = 1; // optional filter by command id + CommandState state = 2; // optional filter by state + uint32 limit = 3; // optional limit of returned statuses, defaults to 100 +} + +message GetCommandStatusResponse { + message CommandStatus { + google.protobuf.Timestamp started = 1; + google.protobuf.Timestamp completed = 2; + Completion completion = 3; + CommandState state = 4; + repeated Command commands = 5; + message RequestStatistics { + uint32 envelopes = 1; + uint32 request_size = 2; + uint32 recipients = 3; + } + RequestStatistics request_statistics = 6; + message CommandUpdates { + message Contract { + Identifier template_id = 1; + string contract_id = 2; + Value contract_key = 3; + } + repeated Contract created = 1; + repeated Contract archived = 2; + uint32 exercised = 3; + uint32 fetched = 4; + uint32 looked_up_by_key = 5; + } + CommandUpdates updates = 7; + } + repeated CommandStatus command_status = 1; +} diff --git a/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/experimental_features.proto b/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/experimental_features.proto index 3453b6d7f..5bb73b779 100644 --- a/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/experimental_features.proto +++ b/community/ledger-api/src/main/protobuf/com/daml/ledger/api/v2/experimental_features.proto @@ -17,9 +17,15 @@ option java_package = "com.daml.ledger.api.v2"; // See the feature message definitions for descriptions. message ExperimentalFeatures { ExperimentalStaticTime static_time = 1; + ExperimentalCommandInspectionService command_inspection_service = 2; } // Ledger is in the static time mode and exposes a time service. message ExperimentalStaticTime { bool supported = 1; } + +// Whether the Ledger API supports command inspection service +message ExperimentalCommandInspectionService { + bool supported = 1; +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandInspectionServiceAuthorization.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandInspectionServiceAuthorization.scala new file mode 100644 index 000000000..2a5ac4172 --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandInspectionServiceAuthorization.scala @@ -0,0 +1,36 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.auth.services + +import com.daml.ledger.api.v2.admin.command_inspection_service.CommandInspectionServiceGrpc.CommandInspectionService +import com.daml.ledger.api.v2.admin.command_inspection_service.{ + CommandInspectionServiceGrpc, + GetCommandStatusRequest, + GetCommandStatusResponse, +} +import com.digitalasset.canton.ledger.api.ProxyCloseable +import com.digitalasset.canton.ledger.api.auth.Authorizer +import com.digitalasset.canton.ledger.api.grpc.GrpcApiService +import io.grpc.ServerServiceDefinition + +import scala.concurrent.{ExecutionContext, Future} + +final class CommandInspectionServiceAuthorization( + protected val service: CommandInspectionService with AutoCloseable, + private val authorizer: Authorizer, +)(implicit executionContext: ExecutionContext) + extends CommandInspectionService + with ProxyCloseable + with GrpcApiService { + + override def bindService(): ServerServiceDefinition = + CommandInspectionServiceGrpc.bindService(this, executionContext) + + override def close(): Unit = service.close() + + override def getCommandStatus( + request: GetCommandStatusRequest + ): Future[GetCommandStatusResponse] = + authorizer.requireAdminClaims(service.getCommandStatus)(request) +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala index 231ce1b87..2a27d0d68 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandServiceAuthorization.scala @@ -27,7 +27,7 @@ final class CommandServiceAuthorization( with GrpcApiService { override def submitAndWait(request: SubmitAndWaitRequest): Future[Empty] = { - val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands) + val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands) authorizer.requireActAndReadClaimsForParties( actAs = effectiveSubmitters.actAs, readAs = effectiveSubmitters.readAs, @@ -39,7 +39,7 @@ final class CommandServiceAuthorization( override def submitAndWaitForTransaction( request: SubmitAndWaitRequest ): Future[SubmitAndWaitForTransactionResponse] = { - val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands) + val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands) authorizer.requireActAndReadClaimsForParties( actAs = effectiveSubmitters.actAs, readAs = effectiveSubmitters.readAs, @@ -51,7 +51,7 @@ final class CommandServiceAuthorization( override def submitAndWaitForUpdateId( request: SubmitAndWaitRequest ): Future[SubmitAndWaitForUpdateIdResponse] = { - val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands) + val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands) authorizer.requireActAndReadClaimsForParties( actAs = effectiveSubmitters.actAs, readAs = effectiveSubmitters.readAs, @@ -63,7 +63,7 @@ final class CommandServiceAuthorization( override def submitAndWaitForTransactionTree( request: SubmitAndWaitRequest ): Future[SubmitAndWaitForTransactionTreeResponse] = { - val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands) + val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands) authorizer.requireActAndReadClaimsForParties( actAs = effectiveSubmitters.actAs, readAs = effectiveSubmitters.readAs, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandSubmissionServiceAuthorization.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandSubmissionServiceAuthorization.scala index 3be204424..f0a800adf 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandSubmissionServiceAuthorization.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/services/CommandSubmissionServiceAuthorization.scala @@ -23,7 +23,7 @@ final class CommandSubmissionServiceAuthorization( with GrpcApiService { override def submit(request: SubmitRequest): Future[SubmitResponse] = { - val effectiveSubmitters = CommandsValidator.effectiveSubmittersV2(request.commands) + val effectiveSubmitters = CommandsValidator.effectiveSubmitters(request.commands) authorizer.requireActAndReadClaimsForParties( actAs = effectiveSubmitters.actAs, readAs = effectiveSubmitters.readAs, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandInspectionService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandInspectionService.scala new file mode 100644 index 000000000..582911ba3 --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/services/CommandInspectionService.scala @@ -0,0 +1,18 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.services + +import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState +import com.digitalasset.canton.platform.apiserver.execution.CommandStatus + +import scala.concurrent.Future + +trait CommandInspectionService { + def findCommandStatus( + commandIdPrefix: String, + state: CommandState, + limit: Int, + ): Future[Seq[CommandStatus]] + +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandInspectionServiceRequestValidator.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandInspectionServiceRequestValidator.scala new file mode 100644 index 000000000..ae7c4b27a --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandInspectionServiceRequestValidator.scala @@ -0,0 +1,26 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.ledger.api.validation + +import com.daml.error.ContextualizedErrorLogger +import com.daml.ledger.api.v2.admin.command_inspection_service.GetCommandStatusRequest +import com.daml.lf.data.Ref +import com.digitalasset.canton.ledger.api.validation.ValidationErrors.invalidField +import io.grpc.StatusRuntimeException + +object CommandInspectionServiceRequestValidator { + def validateCommandStatusRequest( + request: GetCommandStatusRequest + )(implicit + contextualizedErrorLogger: ContextualizedErrorLogger + ): Either[StatusRuntimeException, GetCommandStatusRequest] = + if (request.commandIdPrefix.isEmpty) Right(request) + else + Ref.CommandId + .fromString(request.commandIdPrefix) + .map(_ => request) + .left + .map(invalidField("command_id_prefix", _)) + +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala index 6317971af..e03be54bf 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CommandsValidator.scala @@ -271,10 +271,6 @@ object CommandsValidator { commands.fold(noSubmitters)(effectiveSubmitters) } - def effectiveSubmittersV2(commands: Option[Commands]): Submitters[String] = { - commands.fold(noSubmitters)(effectiveSubmitters) - } - def effectiveSubmitters(commands: Commands): Submitters[String] = { val actAs = commands.actAs.toSet val readAs = commands.readAs.toSet -- actAs diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/CompletionInfo.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/CompletionInfo.scala index 5f337b613..48ced7d9d 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/CompletionInfo.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/CompletionInfo.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.ledger.participant.state import com.daml.lf.data.Ref -import com.daml.lf.transaction.TransactionNodeStatistics import com.daml.logging.entries.{LoggingValue, ToLoggingValue} import com.digitalasset.canton.data.DeduplicationPeriod import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -37,8 +36,6 @@ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} * * Optional as entries created by the participant.state.v1 API do not have this filled. * Only set for participant.state.v2 created entries - * - * @param statistics The statistics that will be used by participant metering. */ final case class CompletionInfo( actAs: List[Ref.Party], @@ -46,7 +43,6 @@ final case class CompletionInfo( commandId: Ref.CommandId, optDeduplicationPeriod: Option[DeduplicationPeriod], submissionId: Option[Ref.SubmissionId], - statistics: Option[TransactionNodeStatistics], ) extends PrettyPrinting { def changeId: ChangeId = ChangeId(applicationId, commandId, actAs.toSet) @@ -62,7 +58,7 @@ final case class CompletionInfo( object CompletionInfo { implicit val `CompletionInfo to LoggingValue`: ToLoggingValue[CompletionInfo] = { - case CompletionInfo(actAs, applicationId, commandId, deduplicationPeriod, submissionId, _) => + case CompletionInfo(actAs, applicationId, commandId, deduplicationPeriod, submissionId) => LoggingValue.Nested.fromEntries( "actAs " -> actAs, "applicationId " -> applicationId, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/ReadService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/ReadService.scala index 5e7469194..b918bf089 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/ReadService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/ReadService.scala @@ -3,23 +3,12 @@ package com.digitalasset.canton.ledger.participant.state -import com.daml.daml_lf_dev.DamlLf.Archive -import com.daml.error.ContextualizedErrorLogger -import com.daml.lf.data.Ref.PackageId import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.health.ReportsHealth -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.protocol.PackageDescription -import com.digitalasset.canton.topology.DomainId -import com.digitalasset.canton.topology.transaction.ParticipantPermission import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.{DomainAlias, LfPartyId} -import com.google.protobuf.ByteString import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import scala.concurrent.Future - /** An interface for reading the state of a ledger participant. * '''Please note that this interface is unstable and may significantly change.''' * @@ -33,7 +22,7 @@ import scala.concurrent.Future * information. See [[Update]] for a description of the state updates * communicated by [[ReadService!.stateUpdates]]. */ -trait ReadService extends ReportsHealth with InternalStateServiceProvider { +trait ReadService extends ReportsHealth { /** Get the stream of state [[Update]]s starting from the beginning or right * after the given [[com.digitalasset.canton.data.Offset]] @@ -139,65 +128,4 @@ trait ReadService extends ReportsHealth with InternalStateServiceProvider { def stateUpdates( beginAfter: Option[Offset] )(implicit traceContext: TraceContext): Source[(Offset, Traced[Update]), NotUsed] - - def getConnectedDomains(request: ReadService.ConnectedDomainRequest)(implicit - traceContext: TraceContext - ): Future[ReadService.ConnectedDomainResponse] = - throw new UnsupportedOperationException() - - /** Get the offsets of the incomplete assigned/unassigned events for a set of stakeholders. - * - * @param validAt The offset of validity in participant offset terms. - * @param stakeholders Only offsets are returned which have at least one stakeholder from this set. - * @return All the offset of assigned/unassigned events which do not have their conterparts visible at - * the validAt offset, and only for the reassignments for which this participant is reassigning. - */ - def incompleteReassignmentOffsets( - validAt: Offset, - stakeholders: Set[LfPartyId], - )(implicit traceContext: TraceContext): Future[Vector[Offset]] = { - val _ = validAt - val _ = stakeholders - val _ = traceContext - Future.successful(Vector.empty) - } - - def getPackageMetadataSnapshot(implicit - contextualizedErrorLogger: ContextualizedErrorLogger - ): PackageMetadata = - throw new UnsupportedOperationException() - - def listLfPackages()(implicit - traceContext: TraceContext - ): Future[Seq[PackageDescription]] = - throw new UnsupportedOperationException() - - def getLfArchive(packageId: PackageId)(implicit - traceContext: TraceContext - ): Future[Option[Archive]] = - throw new UnsupportedOperationException() - - def validateDar( - dar: ByteString, - darName: String, - )(implicit - traceContext: TraceContext - ): Future[SubmissionResult] = - throw new UnsupportedOperationException() -} - -object ReadService { - final case class ConnectedDomainRequest(party: LfPartyId) - - final case class ConnectedDomainResponse( - connectedDomains: Seq[ConnectedDomainResponse.ConnectedDomain] - ) - - object ConnectedDomainResponse { - final case class ConnectedDomain( - domainAlias: DomainAlias, - domainId: DomainId, - permission: ParticipantPermission, - ) - } } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala index ad777f7e6..41a5b3642 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/SubmitterInfo.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.ledger.participant.state import com.daml.lf.data.Ref -import com.daml.lf.transaction.TransactionNodeStatistics import com.daml.logging.entries.{LoggingValue, ToLoggingValue} import com.digitalasset.canton.data.DeduplicationPeriod @@ -39,14 +38,13 @@ final case class SubmitterInfo( /** The ID for the ledger change */ val changeId: ChangeId = ChangeId(applicationId, commandId, actAs.toSet) - def toCompletionInfo(statistics: Option[TransactionNodeStatistics] = None): CompletionInfo = + def toCompletionInfo: CompletionInfo = CompletionInfo( actAs, applicationId, commandId, Some(deduplicationPeriod), submissionId, - statistics, ) } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/WriteService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/WriteService.scala index ba1441b8f..5888f1cb2 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/WriteService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/WriteService.scala @@ -3,15 +3,28 @@ package com.digitalasset.canton.ledger.participant.state +import com.daml.daml_lf_dev.DamlLf.Archive +import com.daml.error.ContextualizedErrorLogger +import com.daml.lf.data.Ref.PackageId import com.daml.lf.data.{ImmArray, Ref} import com.daml.lf.transaction.{GlobalKey, SubmittedTransaction} import com.daml.lf.value.Value -import com.digitalasset.canton.data.ProcessedDisclosedContract +import com.digitalasset.canton.data.{Offset, ProcessedDisclosedContract} import com.digitalasset.canton.ledger.api.health.ReportsHealth +import com.digitalasset.canton.ledger.participant.state.WriteService.{ + ConnectedDomainRequest, + ConnectedDomainResponse, +} +import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.protocol.PackageDescription import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.topology.transaction.ParticipantPermission import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.{DomainAlias, LfPartyId} +import com.google.protobuf.ByteString import java.util.concurrent.CompletionStage +import scala.concurrent.Future /** An interface to change a ledger via a participant. * '''Please note that this interface is unstable and may significantly change.''' @@ -31,14 +44,14 @@ import java.util.concurrent.CompletionStage * The following methods are currently available for changing the state of a Daml ledger: * - submitting a transaction using [[WriteService!.submitTransaction]] * - allocating a new party using [[WritePartyService!.allocateParty]] - * - uploading a new package using [[WritePackagesService!.uploadDar]] * - pruning a participant ledger using [[WriteParticipantPruningService!.prune]] */ trait WriteService extends WritePackagesService with WritePartyService with WriteParticipantPruningService - with ReportsHealth { + with ReportsHealth + with InternalStateServiceProvider { /** Submit a transaction for acceptance to the ledger. * @@ -147,4 +160,65 @@ trait WriteService )(implicit traceContext: TraceContext ): CompletionStage[SubmissionResult] + + def getConnectedDomains(request: ConnectedDomainRequest)(implicit + traceContext: TraceContext + ): Future[ConnectedDomainResponse] = + throw new UnsupportedOperationException() + + /** Get the offsets of the incomplete assigned/unassigned events for a set of stakeholders. + * + * @param validAt The offset of validity in participant offset terms. + * @param stakeholders Only offsets are returned which have at least one stakeholder from this set. + * @return All the offset of assigned/unassigned events which do not have their conterparts visible at + * the validAt offset, and only for the reassignments for which this participant is reassigning. + */ + def incompleteReassignmentOffsets( + validAt: Offset, + stakeholders: Set[LfPartyId], + )(implicit traceContext: TraceContext): Future[Vector[Offset]] = { + val _ = validAt + val _ = stakeholders + val _ = traceContext + Future.successful(Vector.empty) + } + + def getPackageMetadataSnapshot(implicit + contextualizedErrorLogger: ContextualizedErrorLogger + ): PackageMetadata = + throw new UnsupportedOperationException() + + def listLfPackages()(implicit + traceContext: TraceContext + ): Future[Seq[PackageDescription]] = + throw new UnsupportedOperationException() + + def getLfArchive(packageId: PackageId)(implicit + traceContext: TraceContext + ): Future[Option[Archive]] = + throw new UnsupportedOperationException() + + def validateDar( + dar: ByteString, + darName: String, + )(implicit + traceContext: TraceContext + ): Future[SubmissionResult] = + throw new UnsupportedOperationException() +} + +object WriteService { + final case class ConnectedDomainRequest(party: LfPartyId) + + final case class ConnectedDomainResponse( + connectedDomains: Seq[ConnectedDomainResponse.ConnectedDomain] + ) + + object ConnectedDomainResponse { + final case class ConnectedDomain( + domainAlias: DomainAlias, + domainId: DomainId, + permission: ParticipantPermission, + ) + } } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedReadService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedReadService.scala index cf617d347..42337fc65 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedReadService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedReadService.scala @@ -3,29 +3,15 @@ package com.digitalasset.canton.ledger.participant.state.metrics -import com.daml.daml_lf_dev.DamlLf.Archive -import com.daml.error.ContextualizedErrorLogger -import com.daml.lf.data.Ref.PackageId import com.daml.metrics.Timed -import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.health.HealthStatus -import com.digitalasset.canton.ledger.participant.state.{ - InternalStateService, - ReadService, - SubmissionResult, - Update, -} +import com.digitalasset.canton.ledger.participant.state.{ReadService, Update} import com.digitalasset.canton.metrics.LedgerApiServerMetrics -import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata -import com.digitalasset.canton.protocol.PackageDescription import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.google.protobuf.ByteString import org.apache.pekko.NotUsed import org.apache.pekko.stream.scaladsl.Source -import scala.concurrent.Future - final class TimedReadService(delegate: ReadService, metrics: LedgerApiServerMetrics) extends ReadService { @@ -34,60 +20,6 @@ final class TimedReadService(delegate: ReadService, metrics: LedgerApiServerMetr )(implicit traceContext: TraceContext): Source[(Offset, Traced[Update]), NotUsed] = Timed.source(metrics.services.read.stateUpdates, delegate.stateUpdates(beginAfter)) - override def getConnectedDomains( - request: ReadService.ConnectedDomainRequest - )(implicit traceContext: TraceContext): Future[ReadService.ConnectedDomainResponse] = - Timed.future( - metrics.services.read.getConnectedDomains, - delegate.getConnectedDomains(request), - ) - - override def incompleteReassignmentOffsets(validAt: Offset, stakeholders: Set[LfPartyId])(implicit - traceContext: TraceContext - ): Future[Vector[Offset]] = - Timed.future( - metrics.services.read.getConnectedDomains, - delegate.incompleteReassignmentOffsets(validAt, stakeholders), - ) - override def currentHealth(): HealthStatus = delegate.currentHealth() - - override def registerInternalStateService(internalStateService: InternalStateService): Unit = - delegate.registerInternalStateService(internalStateService) - - override def internalStateService: Option[InternalStateService] = - delegate.internalStateService - - override def unregisterInternalStateService(): Unit = - delegate.unregisterInternalStateService() - - override def getPackageMetadataSnapshot(implicit - contextualizedErrorLogger: ContextualizedErrorLogger - ): PackageMetadata = - delegate.getPackageMetadataSnapshot - - override def listLfPackages()(implicit - traceContext: TraceContext - ): Future[Seq[PackageDescription]] = - Timed.future( - metrics.services.read.listLfPackages, - delegate.listLfPackages(), - ) - - override def getLfArchive( - packageId: PackageId - )(implicit traceContext: TraceContext): Future[Option[Archive]] = - Timed.future( - metrics.services.read.getLfArchive, - delegate.getLfArchive(packageId), - ) - - override def validateDar(dar: ByteString, darName: String)(implicit - traceContext: TraceContext - ): Future[SubmissionResult] = - Timed.future( - metrics.services.read.validateDar, - delegate.validateDar(dar, darName), - ) } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedWriteService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedWriteService.scala index d3795cdc8..1af3c567c 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedWriteService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/participant/state/metrics/TimedWriteService.scala @@ -3,14 +3,24 @@ package com.digitalasset.canton.ledger.participant.state.metrics +import com.daml.daml_lf_dev.DamlLf.Archive +import com.daml.error.ContextualizedErrorLogger +import com.daml.lf.data.Ref.PackageId import com.daml.lf.data.{ImmArray, Ref} import com.daml.lf.transaction.{GlobalKey, SubmittedTransaction} import com.daml.lf.value.Value import com.daml.metrics.Timed +import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.data.{Offset, ProcessedDisclosedContract} import com.digitalasset.canton.ledger.api.health.HealthStatus +import com.digitalasset.canton.ledger.participant.state.WriteService.{ + ConnectedDomainRequest, + ConnectedDomainResponse, +} import com.digitalasset.canton.ledger.participant.state.* import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata +import com.digitalasset.canton.protocol.PackageDescription import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.TraceContext import com.google.protobuf.ByteString @@ -104,4 +114,58 @@ final class TimedWriteService(delegate: WriteService, metrics: LedgerApiServerMe override def currentHealth(): HealthStatus = delegate.currentHealth() + + override def getConnectedDomains( + request: ConnectedDomainRequest + )(implicit traceContext: TraceContext): Future[ConnectedDomainResponse] = + Timed.future( + metrics.services.read.getConnectedDomains, + delegate.getConnectedDomains(request), + ) + + override def incompleteReassignmentOffsets(validAt: Offset, stakeholders: Set[LfPartyId])(implicit + traceContext: TraceContext + ): Future[Vector[Offset]] = + Timed.future( + metrics.services.read.getConnectedDomains, + delegate.incompleteReassignmentOffsets(validAt, stakeholders), + ) + + override def registerInternalStateService(internalStateService: InternalStateService): Unit = + delegate.registerInternalStateService(internalStateService) + + override def internalStateService: Option[InternalStateService] = + delegate.internalStateService + + override def unregisterInternalStateService(): Unit = + delegate.unregisterInternalStateService() + + override def getPackageMetadataSnapshot(implicit + contextualizedErrorLogger: ContextualizedErrorLogger + ): PackageMetadata = + delegate.getPackageMetadataSnapshot + + override def listLfPackages()(implicit + traceContext: TraceContext + ): Future[Seq[PackageDescription]] = + Timed.future( + metrics.services.read.listLfPackages, + delegate.listLfPackages(), + ) + + override def getLfArchive( + packageId: PackageId + )(implicit traceContext: TraceContext): Future[Option[Archive]] = + Timed.future( + metrics.services.read.getLfArchive, + delegate.getLfArchive(packageId), + ) + + override def validateDar(dar: ByteString, darName: String)(implicit + traceContext: TraceContext + ): Future[SubmissionResult] = + Timed.future( + metrics.services.read.validateDar, + delegate.validateDar(dar, darName), + ) } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala index 350d494bb..37a8e4a53 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/InMemoryState.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform import com.daml.ledger.resources.ResourceOwner import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.apiserver.services.tracking.SubmissionTracker import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd import com.digitalasset.canton.platform.store.cache.{ @@ -32,6 +33,7 @@ private[platform] class InMemoryState( val stringInterningView: StringInterningView, val dispatcherState: DispatcherState, val submissionTracker: SubmissionTracker, + val commandProgressTracker: CommandProgressTracker, val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) extends NamedLogging { @@ -70,6 +72,7 @@ private[platform] class InMemoryState( object InMemoryState { def owner( + commandProgressTracker: CommandProgressTracker, apiStreamShutdownTimeout: Duration, bufferedStreamsPageSize: Int, maxContractStateCacheSize: Long, @@ -112,6 +115,7 @@ object InMemoryState { ), stringInterningView = new StringInterningView(loggerFactory), submissionTracker = submissionTracker, + commandProgressTracker = commandProgressTracker, loggerFactory = loggerFactory, )(executionContext) } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/LedgerApiServer.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/LedgerApiServer.scala index df401917d..e06e6fe2e 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/LedgerApiServer.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/LedgerApiServer.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform import com.daml.ledger.resources.ResourceOwner import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.config.IndexServiceConfig import com.digitalasset.canton.platform.index.InMemoryStateUpdater import com.digitalasset.canton.tracing.TraceContext @@ -15,6 +16,7 @@ import scala.concurrent.ExecutionContext object LedgerApiServer { def createInMemoryStateAndUpdater( + commandProgressTracker: CommandProgressTracker, indexServiceConfig: IndexServiceConfig, maxCommandsInFlight: Int, metrics: LedgerApiServerMetrics, @@ -26,6 +28,7 @@ object LedgerApiServer { ): ResourceOwner[(InMemoryState, InMemoryStateUpdater.UpdaterFlow)] = { for { inMemoryState <- InMemoryState.owner( + commandProgressTracker = commandProgressTracker, apiStreamShutdownTimeout = indexServiceConfig.apiStreamShutdownTimeout, bufferedStreamsPageSize = indexServiceConfig.bufferedStreamsPageSize, maxContractStateCacheSize = indexServiceConfig.maxContractStateCacheSize, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/ResourceCloseable.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/ResourceCloseable.scala new file mode 100644 index 000000000..05c1628ef --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/ResourceCloseable.scala @@ -0,0 +1,46 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform + +import com.daml.ledger.resources.Resource +import com.digitalasset.canton.lifecycle.{AsyncCloseable, AsyncOrSyncCloseable, FlagCloseableAsync} +import com.digitalasset.canton.logging.NamedLogging +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.blocking + +@SuppressWarnings(Array("org.wartremover.warts.Var")) +abstract class ResourceCloseable extends FlagCloseableAsync with NamedLogging { + private var closeableResource: Option[AsyncCloseable] = None + + override protected def closeAsync(): Seq[AsyncOrSyncCloseable] = blocking(synchronized { + List( + closeableResource.getOrElse( + throw new IllegalStateException( + "Programming error: resource not registered. Please use ResourceOwnerOps.toCloseable." + ) + ) + ) + }) + + def registerResource(resource: Resource[?], name: String)(implicit + traceContext: TraceContext + ): this.type = blocking(synchronized { + this.closeableResource.foreach(_ => + throw new IllegalStateException( + "Programming error: resource registered multiple times. Please use ResourceOwnerFlagCloseableOps.acquireFlagCloseable." + ) + ) + this.closeableResource = Some( + AsyncCloseable( + name = name, + closeFuture = resource.release(), + timeout = timeouts.shutdownNetwork, + onTimeout = err => + logger.warn(s"Resource $name failed to close within ${timeouts.shutdownNetwork}.", err), + ) + ) + this + }) +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala index 12e4c0a49..0a645b5cf 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala @@ -22,7 +22,6 @@ import com.digitalasset.canton.ledger.localstore.api.{ UserManagementStore, } import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.ReadService import com.digitalasset.canton.ledger.participant.state.index.IndexService import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory} import com.digitalasset.canton.metrics.LedgerApiServerMetrics @@ -31,6 +30,7 @@ import com.digitalasset.canton.platform.apiserver.configuration.EngineLoggingCon import com.digitalasset.canton.platform.apiserver.execution.StoreBackedCommandExecutor.AuthenticateContract import com.digitalasset.canton.platform.apiserver.execution.{ AuthorityResolver, + CommandProgressTracker, DynamicDomainParameterGetter, } import com.digitalasset.canton.platform.apiserver.meteringreport.MeteringReportKey @@ -75,12 +75,12 @@ object ApiServiceOwner { // objects indexService: IndexService, submissionTracker: SubmissionTracker, + commandProgressTracker: CommandProgressTracker, userManagementStore: UserManagementStore, identityProviderConfigStore: IdentityProviderConfigStore, partyRecordStore: PartyRecordStore, command: CommandServiceConfig = ApiServiceOwner.DefaultCommandServiceConfig, - optWriteService: Option[state.WriteService], - readService: ReadService, + writeService: state.WriteService, healthChecks: HealthChecks, metrics: LedgerApiServerMetrics, timeServiceBackend: Option[TimeServiceBackend] = None, @@ -138,8 +138,7 @@ object ApiServiceOwner { executionSequencerFactory <- new ExecutionSequencerFactoryOwner() apiServicesOwner = new ApiServices.Owner( participantId = participantId, - optWriteService = optWriteService, - readService = readService, + writeService = writeService, indexService = indexService, authorizer = authorizer, engine = engine, @@ -151,6 +150,7 @@ object ApiServiceOwner { ), submissionTracker = submissionTracker, initSyncTimeout = initSyncTimeout.underlying, + commandProgressTracker = commandProgressTracker, commandConfig = command, optTimeServiceBackend = timeServiceBackend, servicesExecutionContext = servicesExecutionContext, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala index 4e3ab6f15..f24e563f8 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServices.scala @@ -22,7 +22,6 @@ import com.digitalasset.canton.ledger.localstore.api.{ UserManagementStore, } import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.ReadService import com.digitalasset.canton.ledger.participant.state.index.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.LedgerApiServerMetrics @@ -36,6 +35,7 @@ import com.digitalasset.canton.platform.apiserver.meteringreport.MeteringReportK import com.digitalasset.canton.platform.apiserver.services.* import com.digitalasset.canton.platform.apiserver.services.admin.* import com.digitalasset.canton.platform.apiserver.services.command.{ + CommandInspectionServiceImpl, CommandServiceImpl, CommandSubmissionServiceImpl, } @@ -74,8 +74,7 @@ object ApiServices { final class Owner( participantId: Ref.ParticipantId, - optWriteService: Option[state.WriteService], - readService: ReadService, + writeService: state.WriteService, indexService: IndexService, userManagementStore: UserManagementStore, identityProviderConfigStore: IdentityProviderConfigStore, @@ -87,6 +86,7 @@ object ApiServices { timeProviderType: TimeProviderType, submissionTracker: SubmissionTracker, initSyncTimeout: FiniteDuration, + commandProgressTracker: CommandProgressTracker, commandConfig: CommandServiceConfig, optTimeServiceBackend: Option[TimeServiceBackend], servicesExecutionContext: ExecutionContext, @@ -128,7 +128,7 @@ object ApiServices { ) override def acquire()(implicit context: ResourceContext): Resource[ApiServices] = { - implicit val traceContext = TraceContext.empty + implicit val traceContext: TraceContext = TraceContext.empty logger.info(engine.info.toString) for { services <- Resource { @@ -170,6 +170,19 @@ object ApiServices { partyValidator = new PartyValidator(PartyNameChecker.AllowAllParties) ) + val apiInspectionServiceOpt = + Option + .when(ledgerFeatures.commandInspectionService.supported)( + new CommandInspectionServiceAuthorization( + CommandInspectionServiceImpl.createApiService( + commandProgressTracker, + telemetry, + loggerFactory, + ), + authorizer, + ) + ) + val (ledgerApiV2Services, ledgerApiUpdateService) = { val apiTimeServiceOpt = optTimeServiceBackend.map(tsb => @@ -186,7 +199,7 @@ object ApiServices { ) val apiEventQueryService = new ApiEventQueryService(eventQueryService, telemetry, loggerFactory) - val apiPackageService = new ApiPackageService(readService, telemetry, loggerFactory) + val apiPackageService = new ApiPackageService(writeService, telemetry, loggerFactory) val apiUpdateService = new ApiUpdateService( transactionsService, @@ -198,7 +211,7 @@ object ApiServices { val apiStateService = new ApiStateService( acsService = activeContractsService, - readService = readService, + writeService = writeService, txService = transactionsService, metrics = metrics, telemetry = telemetry, @@ -277,6 +290,7 @@ object ApiServices { ) ledgerApiV2Services ::: + apiInspectionServiceOpt.toList ::: writeServiceBackedApiServices ::: List( apiReflectionService, @@ -291,121 +305,118 @@ object ApiServices { )(implicit executionContext: ExecutionContext ): List[BindableService] = { - optWriteService.toList.flatMap { writeService => - val commandExecutor = new TimedCommandExecutor( - new LedgerTimeAwareCommandExecutor( - new StoreBackedCommandExecutor( - engine, - participantId, - readService, - contractStore, - authorityResolver, - authenticateContract, - metrics, - engineLoggingConfig, - loggerFactory, - dynParamGetter, - timeProvider, - ), - new ResolveMaximumLedgerTime(maximumLedgerTimeService, loggerFactory), - maxRetries = 3, + val commandExecutor = new TimedCommandExecutor( + new LedgerTimeAwareCommandExecutor( + new StoreBackedCommandExecutor( + engine, + participantId, + writeService, + contractStore, + authorityResolver, + authenticateContract, metrics, + engineLoggingConfig, loggerFactory, + dynParamGetter, + timeProvider, ), + new ResolveMaximumLedgerTime(maximumLedgerTimeService, loggerFactory), + maxRetries = 3, metrics, - ) + loggerFactory, + ), + metrics, + ) - val validateUpgradingPackageResolutions = - new ValidateUpgradingPackageResolutionsImpl( - getPackageMetadataSnapshot = readService.getPackageMetadataSnapshot(_) - ) - val commandsValidator = new CommandsValidator( - validateUpgradingPackageResolutions = validateUpgradingPackageResolutions + val validateUpgradingPackageResolutions = + new ValidateUpgradingPackageResolutionsImpl( + getPackageMetadataSnapshot = writeService.getPackageMetadataSnapshot(_) ) - val commandSubmissionService = - CommandSubmissionServiceImpl.createApiService( - writeService, - commandsValidator, - timeProvider, - timeProviderType, - seedService, - commandExecutor, - checkOverloaded, - metrics, - telemetry, - loggerFactory, - ) - - val apiPartyManagementService = ApiPartyManagementService.createApiService( - partyManagementService, - new IdentityProviderExists(identityProviderConfigStore), - partyManagementServiceConfig.maxPartiesPageSize, - partyRecordStore, - transactionsService, + val commandsValidator = new CommandsValidator( + validateUpgradingPackageResolutions = validateUpgradingPackageResolutions + ) + val commandSubmissionService = + CommandSubmissionServiceImpl.createApiService( writeService, - managementServiceTimeout, - telemetry = telemetry, - loggerFactory = loggerFactory, - ) - - val apiPackageManagementService = - ApiPackageManagementService.createApiService( - readService = readService, - writeBackend = writeService, - telemetry = telemetry, - loggerFactory = loggerFactory, - ) - - val participantPruningService = ApiParticipantPruningService.createApiService( - indexService, - writeService, - readService, + commandsValidator, + timeProvider, + timeProviderType, + seedService, + commandExecutor, + checkOverloaded, metrics, telemetry, loggerFactory, ) - val ledgerApiV2Services = ledgerApiV2Enabled.toList.flatMap { apiUpdateService => - val apiSubmissionService = new ApiCommandSubmissionService( - commandsValidator = commandsValidator, - commandSubmissionService = commandSubmissionService, - writeService = writeService, - currentLedgerTime = () => timeProvider.getCurrentTime, - currentUtcTime = () => Instant.now, - maxDeduplicationDuration = maxDeduplicationDuration.asJava, - submissionIdGenerator = SubmissionIdGenerator.Random, - metrics = metrics, - telemetry = telemetry, - loggerFactory = loggerFactory, - ) - val apiCommandService = CommandServiceImpl.createApiService( - commandsValidator = commandsValidator, - submissionTracker = submissionTracker, - // Using local services skips the gRPC layer, improving performance. - submit = apiSubmissionService.submitWithTraceContext, - defaultTrackingTimeout = commandConfig.defaultTrackingTimeout, - transactionServices = new CommandServiceImpl.TransactionServices( - getTransactionTreeById = apiUpdateService.getTransactionTreeById, - getTransactionById = apiUpdateService.getTransactionById, - ), - timeProvider = timeProvider, - maxDeduplicationDuration = maxDeduplicationDuration, - telemetry = telemetry, - loggerFactory = loggerFactory, - ) + val apiPartyManagementService = ApiPartyManagementService.createApiService( + partyManagementService, + new IdentityProviderExists(identityProviderConfigStore), + partyManagementServiceConfig.maxPartiesPageSize, + partyRecordStore, + transactionsService, + writeService, + managementServiceTimeout, + telemetry = telemetry, + loggerFactory = loggerFactory, + ) + + val apiPackageManagementService = + ApiPackageManagementService.createApiService( + writeService = writeService, + telemetry = telemetry, + loggerFactory = loggerFactory, + ) - List( - new CommandSubmissionServiceAuthorization(apiSubmissionService, authorizer), - new CommandServiceAuthorization(apiCommandService, authorizer), - ) - } + val participantPruningService = ApiParticipantPruningService.createApiService( + indexService, + writeService, + metrics, + telemetry, + loggerFactory, + ) + + val ledgerApiV2Services = ledgerApiV2Enabled.toList.flatMap { apiUpdateService => + val apiSubmissionService = new ApiCommandSubmissionService( + commandsValidator = commandsValidator, + commandSubmissionService = commandSubmissionService, + writeService = writeService, + currentLedgerTime = () => timeProvider.getCurrentTime, + currentUtcTime = () => Instant.now, + maxDeduplicationDuration = maxDeduplicationDuration.asJava, + submissionIdGenerator = SubmissionIdGenerator.Random, + tracker = commandProgressTracker, + metrics = metrics, + telemetry = telemetry, + loggerFactory = loggerFactory, + ) + val apiCommandService = CommandServiceImpl.createApiService( + commandsValidator = commandsValidator, + submissionTracker = submissionTracker, + // Using local services skips the gRPC layer, improving performance. + submit = apiSubmissionService.submitWithTraceContext, + defaultTrackingTimeout = commandConfig.defaultTrackingTimeout, + transactionServices = new CommandServiceImpl.TransactionServices( + getTransactionTreeById = apiUpdateService.getTransactionTreeById, + getTransactionById = apiUpdateService.getTransactionById, + ), + timeProvider = timeProvider, + maxDeduplicationDuration = maxDeduplicationDuration, + telemetry = telemetry, + loggerFactory = loggerFactory, + ) List( - new PartyManagementServiceAuthorization(apiPartyManagementService, authorizer), - new PackageManagementServiceAuthorization(apiPackageManagementService, authorizer), - new ParticipantPruningServiceAuthorization(participantPruningService, authorizer), - ) ::: ledgerApiV2Services + new CommandSubmissionServiceAuthorization(apiSubmissionService, authorizer), + new CommandServiceAuthorization(apiCommandService, authorizer), + ) } + + List( + new PartyManagementServiceAuthorization(apiPartyManagementService, authorizer), + new PackageManagementServiceAuthorization(apiPackageManagementService, authorizer), + new ParticipantPruningServiceAuthorization(participantPruningService, authorizer), + ) ::: ledgerApiV2Services } } } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/LedgerFeatures.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/LedgerFeatures.scala index c9c94fbce..a8c6a3b0b 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/LedgerFeatures.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/LedgerFeatures.scala @@ -3,6 +3,10 @@ package com.digitalasset.canton.platform.apiserver +import com.daml.ledger.api.v2.experimental_features.ExperimentalCommandInspectionService + final case class LedgerFeatures( - staticTime: Boolean = false + staticTime: Boolean = false, + commandInspectionService: ExperimentalCommandInspectionService = + ExperimentalCommandInspectionService(supported = true), ) diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/CommandProgressTracker.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/CommandProgressTracker.scala new file mode 100644 index 000000000..1f310f496 --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/CommandProgressTracker.scala @@ -0,0 +1,228 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.apiserver.execution + +import com.daml.error.utils.DecodedCantonError +import com.daml.ledger.api.v2.admin.command_inspection_service.GetCommandStatusResponse.CommandStatus.{ + CommandUpdates, + RequestStatistics, +} +import com.daml.ledger.api.v2.admin.command_inspection_service.{ + CommandState, + GetCommandStatusResponse, +} +import com.daml.ledger.api.v2.commands.Command +import com.daml.ledger.api.v2.completion.Completion +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import io.grpc.StatusRuntimeException + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.Failure +import scala.util.control.NonFatal + +final case class CommandStatus( + started: CantonTimestamp, + completed: Option[CantonTimestamp], + completion: Completion, + state: CommandState, + commands: Seq[Command], + requestStatistics: RequestStatistics, + updates: CommandUpdates, +) extends PrettyPrinting { + def toProto: GetCommandStatusResponse.CommandStatus = { + GetCommandStatusResponse.CommandStatus( + started = Some(started.toProtoTimestamp), + completed = completed.map(_.toProtoTimestamp), + completion = Some(completion), + state = state, + commands = commands, + requestStatistics = Some(requestStatistics), + updates = Some(updates), + ) + } + + def decodedError: Option[DecodedCantonError] = + completion.status.flatMap(s => DecodedCantonError.fromGrpcStatus(s).toOption) + + private implicit val prettyRequestStats: Pretty[RequestStatistics] = prettyOfClass( + param("requestSize", _.requestSize), + param("recipients", _.recipients), + param("envelopes", _.envelopes), + ) + + private implicit val prettyUpdateStats: Pretty[CommandUpdates] = prettyOfClass( + param("created", _.created.length), + param("archived", _.archived.length), + param("exercised", _.exercised), + param("fetched", _.fetched), + param("lookedUpByKey", _.lookedUpByKey), + ) + + private def nonEmptyUpdate(update: CommandUpdates): Boolean = { + update.created.nonEmpty || update.archived.nonEmpty || update.exercised > 0 || update.fetched > 0 || update.lookedUpByKey > 0 + } + + override lazy val pretty: Pretty[CommandStatus] = prettyOfClass( + param("commandId", _.completion.commandId.singleQuoted), + param("started", _.started), + paramIfDefined("completed", _.completed), + param("state", _.state.toString().singleQuoted), + param("completion", _.completion.status), + paramIfDefined( + "transactionId", + x => Option.when(x.completion.updateId.nonEmpty)(x.completion.updateId.singleQuoted), + ), + paramIfDefined( + "request", + x => Option.when(x.requestStatistics.requestSize > 0)(x.requestStatistics), + ), + paramIfDefined( + "update", + x => Option.when(nonEmptyUpdate(x.updates))(x.updates), + ), + ) + +} + +object CommandStatus { + def fromProto( + proto: GetCommandStatusResponse.CommandStatus + ): Either[ProtoDeserializationError, CommandStatus] = { + val GetCommandStatusResponse.CommandStatus( + startedP, + completedP, + completionP, + stateP, + commandsP, + requestStatisticsP, + updatesP, + ) = proto + for { + started <- ProtoConverter.parseRequired( + CantonTimestamp.fromProtoTimestamp, + "started", + startedP, + ) + completed <- completedP + .map(CantonTimestamp.fromProtoTimestamp(_).map(Some(_))) + .getOrElse(Right(None)) + completion <- ProtoConverter.required("completion", completionP) + requestsStatistics <- ProtoConverter.required("requestStatistics", requestStatisticsP) + updates <- ProtoConverter.required("updates", updatesP) + } yield CommandStatus( + started = started, + completed = completed, + completion = completion, + state = stateP, + commands = commandsP, + requestStatistics = requestsStatistics, + updates = updates, + ) + } +} + +/** Result handle that allows to update a command with a respective result */ +trait CommandResultHandle { + + def failedSync(err: StatusRuntimeException): Unit + def internalErrorSync(err: Throwable): Unit + + def extractFailure[T](f: Future[T])(implicit executionContext: ExecutionContext): Future[T] = { + f.transform { + case ff @ Failure(err: StatusRuntimeException) => + failedSync(err) + ff + case ff @ Failure(NonFatal(err)) => + internalErrorSync(err) + ff + case rr => rr + } + } + + def recordEnvelopeSizes(batchSize: Int, numRecipients: Int, numEnvelopes: Int): Unit + + def recordTransactionImpact( + transaction: com.daml.lf.transaction.SubmittedTransaction + ): Unit + +} + +object CommandResultHandle { + lazy val NoOp: CommandResultHandle = new CommandResultHandle { + override def failedSync(err: StatusRuntimeException): Unit = () + override def internalErrorSync(err: Throwable): Unit = () + override def recordEnvelopeSizes(batchSize: Int, numRecipients: Int, numEnvelopes: Int): Unit = + () + override def recordTransactionImpact( + transaction: com.daml.lf.transaction.SubmittedTransaction + ): Unit = () + } +} + +/** Command progress tracker for debugging + * + * In order to track the progress of a command, we internally update the progress of the command + * using this tracker trait, and expose the information on the API. + * + * This is in total violation of the CQRS pattern, but it is a necessary evil for debugging. + */ +trait CommandProgressTracker { + + def findCommandStatus( + commandIdPrefix: String, + state: CommandState, + limit: Int, + ): Future[Seq[CommandStatus]] + + def registerCommand( + commandId: String, + submissionId: Option[String], + applicationId: String, + commands: Seq[Command], + actAs: Set[String], + )(implicit traceContext: TraceContext): CommandResultHandle + + def findHandle( + commandId: String, + applicationId: String, + actAs: Seq[String], + submissionId: Option[String], + ): CommandResultHandle + + def processLedgerUpdate(update: Traced[TransactionLogUpdate]): Unit + +} + +object CommandProgressTracker { + lazy val NoOp: CommandProgressTracker = new CommandProgressTracker { + override def findCommandStatus( + commandId: String, + state: CommandState, + limit: Int, + ): Future[Seq[CommandStatus]] = Future.successful(Seq.empty) + + override def registerCommand( + commandId: String, + submissionId: Option[String], + applicationId: String, + commands: Seq[Command], + actAs: Set[String], + )(implicit traceContext: TraceContext): CommandResultHandle = CommandResultHandle.NoOp + + override def findHandle( + commandId: String, + applicationId: String, + actAs: Seq[String], + submissionId: Option[String], + ): CommandResultHandle = + CommandResultHandle.NoOp + + override def processLedgerUpdate(update: Traced[TransactionLogUpdate]): Unit = () + } +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutor.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutor.scala index 38ff960db..2410e8960 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutor.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutor.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.data.{CantonTimestamp, ProcessedDisclosedContract import com.digitalasset.canton.ledger.api.domain.{Commands as ApiCommands, DisclosedContract} import com.digitalasset.canton.ledger.api.util.TimeProvider import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.ReadService +import com.digitalasset.canton.ledger.participant.state.WriteService import com.digitalasset.canton.ledger.participant.state.index.{ContractState, ContractStore} import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.{LoggingContextWithTrace, NamedLoggerFactory, NamedLogging} @@ -45,7 +45,7 @@ import scala.concurrent.{ExecutionContext, Future} private[apiserver] final class StoreBackedCommandExecutor( engine: Engine, participant: Ref.ParticipantId, - readService: ReadService, + writeService: WriteService, contractStore: ContractStore, authorityResolver: AuthorityResolver, authenticateContract: AuthenticateContract, @@ -256,7 +256,7 @@ private[apiserver] final class StoreBackedCommandExecutor( packageLoader .loadPackage( packageId = packageId, - delegate = readService.getLfArchive(_)(loggingContext.traceContext), + delegate = writeService.getLfArchive(_)(loggingContext.traceContext), metric = metrics.execution.getLfPackage, ) .flatMap { maybePackage => diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionService.scala index 25293f877..5158f5196 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionService.scala @@ -12,6 +12,7 @@ import com.daml.ledger.api.v2.command_submission_service.{ SubmitRequest, SubmitResponse, } +import com.daml.ledger.api.v2.commands.Commands import com.daml.metrics.Timed import com.daml.scalautil.future.FutureConversion.CompletionStageConversionOps import com.daml.tracing.{SpanAttribute, Telemetry, TelemetryContext} @@ -30,6 +31,10 @@ import com.digitalasset.canton.logging.{ NamedLogging, } import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.apiserver.execution.{ + CommandProgressTracker, + CommandResultHandle, +} import com.digitalasset.canton.tracing.Traced import com.digitalasset.canton.util.OptionUtil @@ -45,6 +50,7 @@ final class ApiCommandSubmissionService( currentUtcTime: () => Instant, maxDeduplicationDuration: Duration, submissionIdGenerator: SubmissionIdGenerator, + tracker: CommandProgressTracker, metrics: LedgerApiServerMetrics, telemetry: Telemetry, val loggerFactory: NamedLoggerFactory, @@ -72,7 +78,34 @@ final class ApiCommandSubmissionService( loggingContextWithTrace, requestWithSubmissionId.commands.map(_.submissionId), ) - Timed.timedAndTrackedFuture( + val resultHandle = requestWithSubmissionId.commands + .map { + case allCommands @ Commands( + workflowId, + applicationId, + commandId, + commands, + deduplicationPeriod, + minLedgerTimeAbs, + minLedgerTimeRel, + actAs, + readAs, + submissionId, + disclosedContracts, + domainId, + packageIdSelectionPreference, + ) => + tracker.registerCommand( + commandId, + Option.when(submissionId.nonEmpty)(submissionId), + applicationId, + commands, + actAs = allCommands.actAs.toSet, + )(loggingContextWithTrace.traceContext) + } + .getOrElse(CommandResultHandle.NoOp) + + val result = Timed.timedAndTrackedFuture( metrics.commands.submissions, metrics.commands.submissionsRunning, Timed @@ -94,6 +127,7 @@ final class ApiCommandSubmissionService( commandSubmissionService.submit(_).map(_ => SubmitResponse()), ), ) + resultHandle.extractFailure(result) } override def submitReassignment( diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala index c03d002a0..8c2e52497 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiPackageService.scala @@ -25,7 +25,7 @@ import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.grpc.Logging.traceId import com.digitalasset.canton.ledger.api.validation.ValidationErrors import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors -import com.digitalasset.canton.ledger.participant.state.ReadService +import com.digitalasset.canton.ledger.participant.state.WriteService import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext import com.digitalasset.canton.logging.LoggingContextWithTrace.{ implicitExtractTraceContext, @@ -43,7 +43,7 @@ import io.grpc.ServerServiceDefinition import scala.concurrent.{ExecutionContext, Future} private[apiserver] final class ApiPackageService( - readService: ReadService, + writeService: WriteService, telemetry: Telemetry, val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext) @@ -62,7 +62,7 @@ private[apiserver] final class ApiPackageService( implicit val loggingContextWithTrace: LoggingContextWithTrace = LoggingContextWithTrace(loggerFactory, telemetry) logger.info(s"Received request to list packages: $request") - readService + writeService .listLfPackages() .map(p => ListPackagesResponse(p.map(_.packageId.toString))) .andThen(logger.logErrorsOnCall[ListPackagesResponse]) @@ -75,7 +75,7 @@ private[apiserver] final class ApiPackageService( ) { implicit loggingContext => logger.info(s"Received request for a package: $request") withValidatedPackageId(request.packageId, request) { packageId => - readService + writeService .getLfArchive(packageId) .flatMap { case None => @@ -103,7 +103,7 @@ private[apiserver] final class ApiPackageService( Future { val result = if ( - readService.getPackageMetadataSnapshot.packageIdVersionMap.keySet.contains(packageId) + writeService.getPackageMetadataSnapshot.packageIdVersionMap.keySet.contains(packageId) ) { PackageStatus.PACKAGE_STATUS_REGISTERED } else { diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala index c71dca103..9d94199b0 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiStateService.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.ledger.api.ValidationLogger import com.digitalasset.canton.ledger.api.grpc.{GrpcApiService, StreamingServiceLifecycleManagement} import com.digitalasset.canton.ledger.api.validation.{FieldValidator, TransactionFilterValidator} import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors -import com.digitalasset.canton.ledger.participant.state.ReadService +import com.digitalasset.canton.ledger.participant.state.WriteService import com.digitalasset.canton.ledger.participant.state.index.{ IndexActiveContractsService as ACSBackend, IndexTransactionsService, @@ -35,7 +35,7 @@ import scala.concurrent.{ExecutionContext, Future} final class ApiStateService( acsService: ACSBackend, - readService: ReadService, + writeService: WriteService, txService: IndexTransactionsService, metrics: LedgerApiServerMetrics, telemetry: Telemetry, @@ -109,8 +109,8 @@ final class ApiStateService( .fold( t => Future.failed(ValidationLogger.logFailureWithTrace(logger, request, t)), party => - readService - .getConnectedDomains(ReadService.ConnectedDomainRequest(party)) + writeService + .getConnectedDomains(WriteService.ConnectedDomainRequest(party)) .map(response => GetConnectedDomainsResponse( response.connectedDomains.flatMap { connectedDomain => diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala index eaadd989b..4e5633474 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiUpdateService.scala @@ -3,8 +3,11 @@ package com.digitalasset.canton.platform.apiserver.services +import cats.data.OptionT import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.ledger.api.v2.transaction.Transaction import com.daml.ledger.api.v2.update_service.* +import com.daml.lf.data.Ref.Party import com.daml.lf.ledger.EventId import com.daml.logging.entries.LoggingEntries import com.daml.tracing.Telemetry @@ -271,20 +274,9 @@ final class ApiUpdateService( EventId .fromString(request.eventId.unwrap) .map { case EventId(transactionId, _) => - transactionsService - .getTransactionById(TransactionId(transactionId), request.requestingParties)( - loggingContextWithTrace - ) - .flatMap { - case None => - Future.failed( - RequestValidationErrors.NotFound.Transaction - .Reject(transactionId) - .asGrpcError - ) - case Some(transaction) => - Future.successful(transaction) - } + internalGetTransactionById(TransactionId(transactionId), request.requestingParties)( + loggingContextWithTrace + ) } .getOrElse { Future.failed { @@ -301,13 +293,15 @@ final class ApiUpdateService( override def getTransactionById( req: GetTransactionByIdRequest ): Future[GetTransactionResponse] = { - implicit val loggingContextWithTrace = LoggingContextWithTrace(loggerFactory, telemetry) - implicit val errorLoggingContext = ErrorLoggingContext(logger, loggingContextWithTrace) + val loggingContextWithTrace = LoggingContextWithTrace(loggerFactory, telemetry) + val errorLoggingContext = ErrorLoggingContext(logger, loggingContextWithTrace) validator - .validateTransactionById(req) + .validateTransactionById(req)(errorLoggingContext) .fold( - t => Future.failed(ValidationLogger.logFailureWithTrace(logger, req, t)), + t => + Future + .failed(ValidationLogger.logFailureWithTrace(logger, req, t)(loggingContextWithTrace)), request => { implicit val enrichedLoggingContext: LoggingContextWithTrace = LoggingContextWithTrace.enriched( @@ -317,20 +311,8 @@ final class ApiUpdateService( logger.info(s"Received request for transaction by ID, ${enrichedLoggingContext .serializeFiltered("eventId", "parties")}.")(loggingContextWithTrace.traceContext) logger.trace(s"Transaction by ID request: $request")(loggingContextWithTrace.traceContext) - transactionsService - .getTransactionById(request.transactionId, request.requestingParties)( - loggingContextWithTrace - ) - .flatMap { - case None => - Future.failed( - RequestValidationErrors.NotFound.Transaction - .Reject(request.transactionId.unwrap) - .asGrpcError - ) - case Some(transaction) => - Future.successful(transaction) - } + + internalGetTransactionById(request.transactionId, request.requestingParties) .andThen( logger.logErrorsOnCall[GetTransactionResponse](loggingContextWithTrace.traceContext) ) @@ -338,6 +320,48 @@ final class ApiUpdateService( ) } + private def internalGetTransactionById( + transactionId: TransactionId, + requestingParties: Set[Party], + )(implicit + loggingContextWithTrace: LoggingContextWithTrace + ): Future[GetTransactionResponse] = + OptionT(transactionsService.getTransactionById(transactionId, requestingParties)) + .orElse { + logger.debug( + s"Transaction not found in flat transaction lookup for transactionId $transactionId and requestingParties $requestingParties, falling back to transaction tree lookup." + ) + // When a command submission completes successfully, + // the submitters can end up getting a TRANSACTION_NOT_FOUND when querying its corresponding flat transaction that either: + // * has only non-consuming events + // * has only events of contracts which have stakeholders that are not amongst the requestingParties + // In these situations, we fallback to a transaction tree lookup and populate the flat transaction response + // with its details but no events. + OptionT(transactionsService.getTransactionTreeById(transactionId, requestingParties)) + .map(tree => + GetTransactionResponse( + tree.transaction.map(transaction => + Transaction( + updateId = transaction.updateId, + commandId = transaction.commandId, + workflowId = transaction.workflowId, + effectiveAt = transaction.effectiveAt, + events = Seq.empty, + offset = transaction.offset, + domainId = transaction.domainId, + traceContext = transaction.traceContext, + recordTime = transaction.recordTime, + ) + ) + ) + ) + } + .getOrElseF( + Future.failed( + RequestValidationErrors.NotFound.Transaction.Reject(transactionId.unwrap).asGrpcError + ) + ) + private def updatesLoggable(updates: GetUpdatesResponse): LoggingEntries = updates.update match { case GetUpdatesResponse.Update.Transaction(t) => diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala index 3f8ade117..3cc0fc5d2 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/ApiVersionService.scala @@ -73,7 +73,8 @@ private[apiserver] final class ApiVersionService( ), experimental = Some( ExperimentalFeatures.of( - staticTime = Some(ExperimentalStaticTime(supported = ledgerFeatures.staticTime)) + staticTime = Some(ExperimentalStaticTime(supported = ledgerFeatures.staticTime)), + commandInspectionService = Some(ledgerFeatures.commandInspectionService), ) ), ) diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiCommandInspectionService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiCommandInspectionService.scala new file mode 100644 index 000000000..991fc9a79 --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiCommandInspectionService.scala @@ -0,0 +1,67 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.apiserver.services.admin + +import com.daml.error.ContextualizedErrorLogger +import com.daml.ledger.api.v2.admin.command_inspection_service.* +import com.daml.tracing.Telemetry +import com.digitalasset.canton.ledger.api.ValidationLogger +import com.digitalasset.canton.ledger.api.grpc.StreamingServiceLifecycleManagement +import com.digitalasset.canton.ledger.api.services.CommandInspectionService +import com.digitalasset.canton.ledger.api.validation.CommandInspectionServiceRequestValidator +import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext +import com.digitalasset.canton.logging.{ + ErrorLoggingContext, + LoggingContextWithTrace, + NamedLoggerFactory, + NamedLogging, +} +import com.digitalasset.canton.tracing.TraceContext + +import scala.concurrent.{ExecutionContext, Future} + +class ApiCommandInspectionService( + service: CommandInspectionService, + telemetry: Telemetry, + val loggerFactory: NamedLoggerFactory, +)(implicit + executionContext: ExecutionContext +) extends CommandInspectionServiceGrpc.CommandInspectionService + with StreamingServiceLifecycleManagement + with NamedLogging { + + protected implicit val contextualizedErrorLogger: ContextualizedErrorLogger = + ErrorLoggingContext( + logger, + loggerFactory.properties, + TraceContext.empty, + ) + + override def getCommandStatus( + request: GetCommandStatusRequest + ): Future[GetCommandStatusResponse] = { + implicit val loggingContextWithTrace: LoggingContextWithTrace = + LoggingContextWithTrace(loggerFactory, telemetry) + logger.info(s"Received new command status request $request.") + CommandInspectionServiceRequestValidator + .validateCommandStatusRequest(request)( + ErrorLoggingContext( + logger, + loggingContextWithTrace.toPropertiesMap, + loggingContextWithTrace.traceContext, + ) + ) + .fold( + t => + Future.failed[GetCommandStatusResponse]( + ValidationLogger.logFailureWithTrace(logger, request, t) + ), + _ => + service + .findCommandStatus(request.commandIdPrefix, request.state, request.limit) + .map(statuses => GetCommandStatusResponse(commandStatus = statuses.map(_.toProto))), + ) + } + +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala index 85fa25cfd..3ec03fe04 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementService.scala @@ -11,8 +11,7 @@ import com.daml.logging.LoggingContext import com.daml.tracing.Telemetry import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.util.TimestampConversion -import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.{ReadService, SubmissionResult} +import com.digitalasset.canton.ledger.participant.state.{SubmissionResult, WriteService} import com.digitalasset.canton.logging.LoggingContextUtil.createLoggingContext import com.digitalasset.canton.logging.LoggingContextWithTrace.implicitExtractTraceContext import com.digitalasset.canton.logging.TracedLoggerOps.TracedLoggerOps @@ -24,8 +23,7 @@ import scala.concurrent.{ExecutionContext, Future} import scala.util.Try private[apiserver] final class ApiPackageManagementService private ( - readService: ReadService, - packagesWrite: state.WritePackagesService, + writeService: WriteService, submissionIdGenerator: String => Ref.SubmissionId, telemetry: Telemetry, val loggerFactory: NamedLoggerFactory, @@ -52,7 +50,7 @@ private[apiserver] final class ApiPackageManagementService private ( LoggingContextWithTrace(loggerFactory, telemetry) logger.info("Listing known packages.") - readService + writeService .listLfPackages() .map { pkgs => ListKnownPackagesResponse(pkgs.map { pkgDescription => @@ -72,7 +70,7 @@ private[apiserver] final class ApiPackageManagementService private ( logging.submissionId(submissionIdGenerator(request.submissionId)) ) { implicit loggingContext: LoggingContextWithTrace => logger.info(s"Validating DAR file, ${loggingContext.serializeFiltered("submissionId")}.") - readService + writeService .validateDar(dar = request.darFile, darName = "defaultDarName") .flatMap { case SubmissionResult.Acknowledged => Future.successful(ValidateDarFileResponse()) @@ -88,7 +86,7 @@ private[apiserver] final class ApiPackageManagementService private ( ) { implicit loggingContext: LoggingContextWithTrace => logger.info(s"Uploading DAR file, ${loggingContext.serializeFiltered("submissionId")}.") - packagesWrite + writeService .uploadDar(request.darFile, submissionId) .flatMap { case SubmissionResult.Acknowledged => Future.successful(UploadDarFileResponse()) @@ -102,16 +100,14 @@ private[apiserver] final class ApiPackageManagementService private ( private[apiserver] object ApiPackageManagementService { def createApiService( - readService: ReadService, - writeBackend: state.WritePackagesService, + writeService: WriteService, telemetry: Telemetry, loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext ): PackageManagementServiceGrpc.PackageManagementService & GrpcApiService = new ApiPackageManagementService( - readService, - writeBackend, + writeService, augmentSubmissionId, telemetry, loggerFactory, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiParticipantPruningService.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiParticipantPruningService.scala index 36820ff33..1f4339bf1 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiParticipantPruningService.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiParticipantPruningService.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.ledger.api.grpc.GrpcApiService import com.digitalasset.canton.ledger.api.validation.ValidationErrors.* import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.ReadService +import com.digitalasset.canton.ledger.participant.state.WriteService import com.digitalasset.canton.ledger.participant.state.index.{ IndexParticipantPruningService, LedgerEndService, @@ -50,8 +50,7 @@ import scala.concurrent.{ExecutionContext, Future} final class ApiParticipantPruningService private ( readBackend: IndexParticipantPruningService with LedgerEndService, - writeBackend: state.WriteParticipantPruningService, - readService: ReadService, + writeService: WriteService, metrics: LedgerApiServerMetrics, telemetry: Telemetry, val loggerFactory: NamedLoggerFactory, @@ -106,7 +105,7 @@ final class ApiParticipantPruningService private ( )(MetricsContext(("phase", "underlyingLedger"))) _ = logger.debug("Getting incomplete reassignments") - incompletReassignmentOffsets <- readService.incompleteReassignmentOffsets( + incompletReassignmentOffsets <- writeService.incompleteReassignmentOffsets( validAt = pruneUpTo, stakeholders = Set.empty, // getting all incomplete reassignments ) @@ -153,7 +152,7 @@ final class ApiParticipantPruningService private ( logger.info( s"About to prune participant ledger up to ${pruneUpTo.toApiString} inclusively starting with the write service." ) - writeBackend + writeService .prune(pruneUpTo, submissionId, pruneAllDivulgedContracts) .toScalaUnwrapped .flatMap { @@ -241,8 +240,7 @@ final class ApiParticipantPruningService private ( object ApiParticipantPruningService { def createApiService( readBackend: IndexParticipantPruningService with LedgerEndService, - writeBackend: state.WriteParticipantPruningService, - readService: state.ReadService, + writeService: WriteService, metrics: LedgerApiServerMetrics, telemetry: Telemetry, loggerFactory: NamedLoggerFactory, @@ -251,8 +249,7 @@ object ApiParticipantPruningService { ): ParticipantPruningServiceGrpc.ParticipantPruningService with GrpcApiService = new ApiParticipantPruningService( readBackend, - writeBackend, - readService, + writeService, metrics, telemetry, loggerFactory, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandInspectionServiceImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandInspectionServiceImpl.scala new file mode 100644 index 000000000..6ededfb28 --- /dev/null +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandInspectionServiceImpl.scala @@ -0,0 +1,58 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.platform.apiserver.services.command + +import com.daml.ledger.api.v2.admin.command_inspection_service.{ + CommandInspectionServiceGrpc, + CommandState, +} +import com.daml.tracing.Telemetry +import com.digitalasset.canton.ledger.api.grpc.GrpcApiService +import com.digitalasset.canton.ledger.api.services.CommandInspectionService +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.platform.apiserver.execution.{CommandProgressTracker, CommandStatus} +import com.digitalasset.canton.platform.apiserver.services.admin.ApiCommandInspectionService +import io.grpc.ServerServiceDefinition + +import scala.concurrent.{ExecutionContext, Future} + +private[apiserver] final class CommandInspectionServiceImpl private ( + tracker: CommandProgressTracker, + val loggerFactory: NamedLoggerFactory, +) extends CommandInspectionService + with NamedLogging { + + override def findCommandStatus( + commandIdPrefix: String, + state: CommandState, + limit: Int, + ): Future[Seq[CommandStatus]] = + tracker.findCommandStatus(commandIdPrefix, state, limit) +} + +private[apiserver] object CommandInspectionServiceImpl { + + def createApiService( + tracker: CommandProgressTracker, + telemetry: Telemetry, + loggerFactory: NamedLoggerFactory, + )(implicit + executionContext: ExecutionContext + ): ApiCommandInspectionService & GrpcApiService = { + val impl: CommandInspectionService = + new CommandInspectionServiceImpl( + tracker, + loggerFactory, + ) + + new ApiCommandInspectionService( + impl, + telemetry, + loggerFactory, + ) with GrpcApiService { + override def bindService(): ServerServiceDefinition = + CommandInspectionServiceGrpc.bindService(this, executionContext) + } + } +} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala index 9415b533b..24d9124ec 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala @@ -18,6 +18,7 @@ import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.participant.state.{CompletionInfo, Reassignment, Update} import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.apiserver.services.tracking.SubmissionTracker import com.digitalasset.canton.platform.index.InMemoryStateUpdater.{PrepareResult, UpdaterFlow} import com.digitalasset.canton.platform.indexer.TransactionTraversalUtils @@ -147,6 +148,8 @@ private[platform] object InMemoryStateUpdater { ) // must be after LedgerEnd update because this could trigger API actions relating to this LedgerEnd trackSubmissions(inMemoryState.submissionTracker, result.updates) + // can be done at any point in the pipeline, it is for debugging only + trackCommandProgress(inMemoryState.commandProgressTracker, result.updates) } private def trackSubmissions( @@ -169,11 +172,17 @@ private[platform] object InMemoryStateUpdater { ) ) => completionDetails.completionStreamResponse -> completionDetails.submitters - case Traced(rejected: TransactionLogUpdate.TransactionRejected) => - rejected.completionDetails.completionStreamResponse -> rejected.completionDetails.submitters + case Traced(TransactionLogUpdate.TransactionRejected(_, completionDetails)) => + completionDetails.completionStreamResponse -> completionDetails.submitters } .foreach(submissionTracker.onCompletion) + private def trackCommandProgress( + commandProgressTracker: CommandProgressTracker, + updates: Vector[Traced[TransactionLogUpdate]], + ): Unit = + updates.view.foreach(commandProgressTracker.processLedgerUpdate) + private def updateCaches( inMemoryState: InMemoryState, updates: Vector[Traced[TransactionLogUpdate]], @@ -355,7 +364,7 @@ private[platform] object InMemoryStateUpdater { offset = offset, events = events.toVector, completionDetails = completionDetails, - domainId = Some(txAccepted.domainId.toProtoPrimitive), + domainId = txAccepted.domainId.toProtoPrimitive, recordTime = txAccepted.recordTime, ) } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerServiceOwner.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerServiceOwner.scala index e8341272c..348a7dbbd 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerServiceOwner.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/IndexerServiceOwner.scala @@ -39,6 +39,7 @@ final class IndexerServiceOwner( dataSourceProperties: DataSourceProperties, highAvailability: HaConfig, indexServiceDbDispatcher: Option[DbDispatcher], + excludedPackageIds: Set[Ref.PackageId], )(implicit materializer: Materializer, traceContext: TraceContext) extends ResourceOwner[ReportsHealth] with NamedLogging { @@ -48,11 +49,12 @@ final class IndexerServiceOwner( new FlywayMigrations( participantDataSourceConfig.jdbcUrl, loggerFactory, - ) + )(executionContext, traceContext) val indexerFactory = new JdbcIndexer.Factory( participantId, participantDataSourceConfig, config, + excludedPackageIds, readService, metrics, inMemoryState, @@ -104,7 +106,7 @@ object IndexerServiceOwner { def migrateOnly( jdbcUrl: String, loggerFactory: NamedLoggerFactory, - )(implicit rc: ResourceContext, traceContext: TraceContext): Future[Unit] = { + )(implicit ec: ExecutionContext, traceContext: TraceContext): Future[Unit] = { val flywayMigrations = new FlywayMigrations(jdbcUrl, loggerFactory) flywayMigrations.migrate() diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala index 5f74946e8..279fcb9c2 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/JdbcIndexer.scala @@ -41,6 +41,7 @@ object JdbcIndexer { participantId: Ref.ParticipantId, participantDataSourceConfig: ParticipantDataSourceConfig, config: IndexerConfig, + excludedPackageIds: Set[Ref.PackageId], readService: state.ReadService, metrics: LedgerApiServerMetrics, inMemoryState: InMemoryState, @@ -110,6 +111,7 @@ object JdbcIndexer { submissionBatchSize = config.submissionBatchSize, maxTailerBatchSize = config.maxTailerBatchSize, maxOutputBatchedBufferSize = config.maxOutputBatchedBufferSize, + excludedPackageIds = excludedPackageIds, metrics = metrics, inMemoryStateUpdaterFlow = apiUpdaterFlow, stringInterningView = inMemoryState.stringInterningView, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala index d653a060d..723c2df79 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipe.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.platform.indexer.parallel -import com.digitalasset.canton.util.PekkoUtil.syntax.* +import com.digitalasset.canton.util.BatchN import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.Source +import org.apache.pekko.stream.scaladsl.Flow import scala.concurrent.Future @@ -23,11 +23,11 @@ object BatchingParallelIngestionPipe { ingester: DB_BATCH => Future[DB_BATCH], maxTailerBatchSize: Int, ingestTail: Vector[DB_BATCH] => Future[Vector[DB_BATCH]], - )(source: Source[IN, NotUsed]): Source[DB_BATCH, NotUsed] = + ): Flow[IN, DB_BATCH, NotUsed] = { // Stage 1: the stream coming from ReadService, involves deserialization and translation to Update-s - source + Flow[IN] // Stage 2: Batching plus mapping to Database DTOs encapsulates all the CPU intensive computation of the ingestion. Executed in parallel. - .batchN(submissionBatchSize.toInt, inputMappingParallelism) + .via(BatchN(submissionBatchSize.toInt, inputMappingParallelism)) .mapAsync(inputMappingParallelism)(inputMapper) // Stage 3: Encapsulates sequential/stateful computation (generation of sequential IDs for events) .scan(seqMapperZero)(seqMapper) @@ -43,4 +43,5 @@ object BatchingParallelIngestionPipe { // Stage 7: Updating ledger-end and related data in database (this stage completion demarcates the consistent point-in-time) .mapAsync(1)(ingestTail) .mapConcat(identity) + } } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala index fd8713d5a..24e7f746b 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscription.scala @@ -50,6 +50,7 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( submissionBatchSize: Long, maxOutputBatchedBufferSize: Int, maxTailerBatchSize: Int, + excludedPackageIds: Set[Ref.PackageId], metrics: LedgerApiServerMetrics, inMemoryStateUpdaterFlow: InMemoryStateUpdater.UpdaterFlow, stringInterningView: StringInterning & InternizingStringInterningView, @@ -73,52 +74,56 @@ private[platform] final case class ParallelIndexerSubscription[DB_BATCH]( )(implicit traceContext: TraceContext): InitializeParallelIngestion.Initialized => Handle = { initialized => import MetricsContext.Implicits.empty - val (killSwitch, completionFuture) = BatchingParallelIngestionPipe( - submissionBatchSize = submissionBatchSize, - inputMappingParallelism = inputMappingParallelism, - inputMapper = inputMapperExecutor.execute( - inputMapper( - metrics, - mapInSpan( - UpdateToDbDto( - participantId = participantId, - translation = translation, - compressionStrategy = compressionStrategy, - metrics = metrics, + val (killSwitch, completionFuture) = initialized.readServiceSource + .buffered(metrics.parallelIndexer.inputBufferLength, maxInputBufferSize) + .via( + BatchingParallelIngestionPipe( + submissionBatchSize = submissionBatchSize, + inputMappingParallelism = inputMappingParallelism, + inputMapper = inputMapperExecutor.execute( + inputMapper( + metrics, + mapInSpan( + UpdateToDbDto( + participantId = participantId, + translation = translation, + compressionStrategy = compressionStrategy, + metrics = metrics, + ) + ), + UpdateToMeteringDbDto( + metrics = metrics.indexerEvents, + excludedPackageIds = excludedPackageIds, + ), + logger, ) ), - UpdateToMeteringDbDto(metrics = metrics.indexerEvents), - logger, + seqMapperZero = + seqMapperZero(initialized.initialEventSeqId, initialized.initialStringInterningId), + seqMapper = seqMapper( + dtos => stringInterningView.internize(DbDtoToStringsForInterning(dtos)), + metrics, + ), + batchingParallelism = batchingParallelism, + batcher = batcherExecutor.execute( + batcher(ingestionStorageBackend.batch(_, stringInterningView)) + ), + ingestingParallelism = ingestionParallelism, + ingester = ingester( + ingestFunction = ingestionStorageBackend.insertBatch, + zeroDbBatch = ingestionStorageBackend.batch(Vector.empty, stringInterningView), + dbDispatcher = dbDispatcher, + metrics = metrics, + ), + maxTailerBatchSize = maxTailerBatchSize, + ingestTail = ingestTail[DB_BATCH]( + parameterStorageBackend.updateLedgerEnd, + dbDispatcher, + metrics, + logger, + ), ) - ), - seqMapperZero = - seqMapperZero(initialized.initialEventSeqId, initialized.initialStringInterningId), - seqMapper = seqMapper( - dtos => stringInterningView.internize(DbDtoToStringsForInterning(dtos)), - metrics, - ), - batchingParallelism = batchingParallelism, - batcher = batcherExecutor.execute( - batcher(ingestionStorageBackend.batch(_, stringInterningView)) - ), - ingestingParallelism = ingestionParallelism, - ingester = ingester( - ingestFunction = ingestionStorageBackend.insertBatch, - zeroDbBatch = ingestionStorageBackend.batch(Vector.empty, stringInterningView), - dbDispatcher = dbDispatcher, - metrics = metrics, - ), - maxTailerBatchSize = maxTailerBatchSize, - ingestTail = ingestTail[DB_BATCH]( - parameterStorageBackend.updateLedgerEnd, - dbDispatcher, - metrics, - logger, - ), - )( - initialized.readServiceSource - .buffered(metrics.parallelIndexer.inputBufferLength, maxInputBufferSize) - ) + ) .map(batch => batch.offsetsUpdates -> batch.lastSeqEventId) .buffered( counter = metrics.parallelIndexer.outputBatchedBufferLength, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala index 967d294e6..ee98b9535 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/package.scala @@ -3,10 +3,11 @@ package com.digitalasset.canton -import com.daml.ledger.resources.ResourceOwner +import com.daml.ledger.resources.{ResourceContext, ResourceOwner} import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.tracing.TraceContext -import scala.concurrent.Future +import scala.concurrent.{ExecutionContext, Future} /** Type aliases used throughout the package */ package object platform { @@ -77,4 +78,17 @@ package object platform { t <- resourceOwner } yield t } + + implicit class ResourceOwnerFlagCloseableOps[T <: ResourceCloseable]( + val resourceOwner: ResourceOwner[T] + ) extends AnyVal { + def acquireFlagCloseable( + name: String + )(implicit executionContext: ExecutionContext, traceContext: TraceContext): Future[T] = { + val resource = resourceOwner.acquire()(ResourceContext(executionContext)) + resource.asFuture.map( + _.registerResource(resource, name) + ) + } + } } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala index 306ec0805..3b46ec8f4 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/CompletionFromTransaction.scala @@ -17,8 +17,8 @@ import com.google.rpc.status.Status as StatusProto import io.grpc.Status // Turn a stream of transactions into a stream of completions for a given application and set of parties -private[platform] object CompletionFromTransaction { - private val OkStatus = StatusProto.of(Status.Code.OK.value(), "", Seq.empty) +object CompletionFromTransaction { + val OkStatus = StatusProto.of(Status.Code.OK.value(), "", Seq.empty) private val RejectionTransactionId = "" def acceptedCompletion( @@ -89,7 +89,7 @@ private[platform] object CompletionFromTransaction { offset = Some(ParticipantOffset.of(ParticipantOffset.Value.Absolute(offset.toApiString))), ) - private[store] def toApiCompletion( + def toApiCompletion( commandId: String, transactionId: String, applicationId: String, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/FlywayMigrations.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/FlywayMigrations.scala index b2c5179e0..6ee5726ee 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/FlywayMigrations.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/FlywayMigrations.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.platform.store -import com.daml.ledger.resources.ResourceContext import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.platform.store.FlywayMigrations.* import com.digitalasset.canton.platform.store.backend.VerifiedDataSource @@ -18,10 +17,9 @@ import scala.concurrent.{ExecutionContext, Future} class FlywayMigrations( jdbcUrl: String, val loggerFactory: NamedLoggerFactory, -)(implicit resourceContext: ResourceContext, traceContext: TraceContext) +)(implicit ec: ExecutionContext, traceContext: TraceContext) extends NamedLogging { private val dbType = DbType.jdbcType(jdbcUrl) - implicit private val ec: ExecutionContext = resourceContext.executionContext private def runF[T](t: FluentConfiguration => Future[T]): Future[T] = VerifiedDataSource(jdbcUrl, loggerFactory).flatMap(dataSource => diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDto.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDto.scala index c4074a654..bdc26d66b 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDto.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDto.scala @@ -3,7 +3,9 @@ package com.digitalasset.canton.platform.store.backend +import com.daml.lf.data.Ref import com.daml.lf.data.Time.Timestamp +import com.daml.lf.transaction.TransactionNodeStatistics import com.daml.metrics.api.MetricsContext import com.daml.metrics.api.MetricsContext.withExtraMetricLabels import com.digitalasset.canton.data.Offset @@ -16,6 +18,7 @@ object UpdateToMeteringDbDto { def apply( clock: () => Long = () => Timestamp.now().micros, + excludedPackageIds: Set[Ref.PackageId], metrics: IndexedUpdatesMetrics, )(implicit mc: MetricsContext @@ -29,12 +32,12 @@ object UpdateToMeteringDbDto { val ledgerOffset = input.last._1.toHexString (for { - optCompletionInfo <- input.collect { case (_, Traced(ta: TransactionAccepted)) => - ta.completionInfoO - } - ci <- optCompletionInfo.iterator - statistics <- ci.statistics - } yield (ci.applicationId, statistics.committed.actions + statistics.rolledBack.actions)) + (completionInfo, transactionAccepted) <- input.iterator + .collect { case (_, Traced(ta: TransactionAccepted)) => ta } + .flatMap(ta => ta.completionInfoO.iterator.map(_ -> ta)) + applicationId = completionInfo.applicationId + statistics = TransactionNodeStatistics(transactionAccepted.transaction, excludedPackageIds) + } yield (applicationId, statistics.committed.actions + statistics.rolledBack.actions)).toList .groupMapReduce(_._1)(_._2)(_ + _) .toList .filter(_._2 != 0) diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CommonStorageBackendFactory.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CommonStorageBackendFactory.scala index 59c6a791e..050f68e3f 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CommonStorageBackendFactory.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CommonStorageBackendFactory.scala @@ -16,9 +16,6 @@ import com.digitalasset.canton.platform.store.cache.LedgerEndCache trait CommonStorageBackendFactory extends StorageBackendFactory { - override val createParameterStorageBackend: ParameterStorageBackend = - ParameterStorageBackendImpl - override val createMeteringParameterStorageBackend: MeteringParameterStorageBackend = MeteringParameterStorageBackendImpl diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala index fd740f05b..1888c3343 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/CompletionStorageBackendTemplate.scala @@ -43,7 +43,7 @@ class CompletionStorageBackendTemplate( )(connection: Connection): Vector[CompletionStreamResponse] = { import ComposableQuery.* import com.digitalasset.canton.platform.store.backend.Conversions.applicationIdToStatement - import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* + import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* val internedParties = parties.view.map(stringInterning.party.tryInternalize).flatMap(_.toList).toSet if (internedParties.isEmpty) { diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala index 8c2f84889..3b7e1459a 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventReaderQueries.scala @@ -8,7 +8,7 @@ import com.daml.lf.data.Ref.Party import com.daml.lf.value.Value.ContractId import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.dao.events.Raw import com.digitalasset.canton.platform.store.dao.events.Raw.FlatEvent import com.digitalasset.canton.platform.store.interning.StringInterning diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala index b1976fc6f..f65482fc0 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/EventStorageBackendTemplate.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ CompositeSql, SqlStringInterpolation, } -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.dao.events.Raw import com.digitalasset.canton.platform.store.interning.StringInterning diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/MeteringStorageBackendImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/MeteringStorageBackendImpl.scala index 572a2dcd7..86c5ba6e3 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/MeteringStorageBackendImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/MeteringStorageBackendImpl.scala @@ -26,7 +26,7 @@ import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ } import com.digitalasset.canton.platform.store.backend.common.MeteringParameterStorageBackendImpl.assertLedgerMeteringEnd import com.digitalasset.canton.platform.store.backend.common.MeteringStorageBackendImpl.* -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.backend.{ Conversions, MeteringStorageReadBackend, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ParameterStorageBackendImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ParameterStorageBackendImpl.scala index 6be62ab60..10ec6cd2f 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ParameterStorageBackendImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/ParameterStorageBackendImpl.scala @@ -17,12 +17,14 @@ import scalaz.syntax.tag.* import java.sql.Connection -private[backend] object ParameterStorageBackendImpl extends ParameterStorageBackend { +private[backend] class ParameterStorageBackendImpl(queryStrategy: QueryStrategy) + extends ParameterStorageBackend { override def updateLedgerEnd( ledgerEnd: ParameterStorageBackend.LedgerEnd )(connection: Connection): Unit = { import Conversions.OffsetToStatement + queryStrategy.forceSynchronousCommitForCurrentTransactionForPostgreSQL(connection) discard( SQL""" UPDATE diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/PartyStorageBackendTemplate.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/PartyStorageBackendTemplate.scala index b3a93f0ac..49083232a 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/PartyStorageBackendTemplate.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/PartyStorageBackendTemplate.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.platform.store.backend.Conversions.{ } import com.digitalasset.canton.platform.store.backend.PartyStorageBackend import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.dao.JdbcLedgerDao.{acceptType, rejectType} import com.digitalasset.canton.platform.store.entries.PartyLedgerEntry diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala index 5cb598014..504902db2 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/QueryStrategy.scala @@ -9,6 +9,8 @@ import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ SqlStringInterpolation, } +import java.sql.Connection + object QueryStrategy { /** This populates the following part of the query: @@ -135,4 +137,6 @@ trait QueryStrategy { } def analyzeTable(tableName: String): CompositeSql + + def forceSynchronousCommitForCurrentTransactionForPostgreSQL(connection: Connection): Unit = () } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlAsVectorOf.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala similarity index 85% rename from community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlAsVectorOf.scala rename to community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala index 33cf8c286..8cd9a2b81 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlAsVectorOf.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/SimpleSqlExtensions.scala @@ -8,7 +8,7 @@ import anorm.{Cursor, Row, RowParser, SimpleSql} import java.sql.Connection import scala.util.{Failure, Success, Try} -private[backend] object SimpleSqlAsVectorOf { +private[backend] object SimpleSqlExtensions { implicit final class `SimpleSql ops`(val sql: SimpleSql[Row]) extends AnyVal { @@ -46,6 +46,11 @@ private[backend] object SimpleSqlAsVectorOf { ) } + def asSingle[A](parser: RowParser[A])(implicit connection: Connection): A = + sql.as(parser.single) + + def asSingleOpt[A](parser: RowParser[A])(implicit connection: Connection): Option[A] = + sql.as(parser.singleOpt) } } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/StringInterningStorageBackendImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/StringInterningStorageBackendImpl.scala index 8eb6e103f..5ab98a432 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/StringInterningStorageBackendImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/StringInterningStorageBackendImpl.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.platform.store.backend.common import anorm.SqlParser.{int, str} import anorm.{RowParser, SqlStringInterpolation, ~} import com.digitalasset.canton.platform.store.backend.StringInterningStorageBackend -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import java.sql.Connection diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionPointwiseQueries.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionPointwiseQueries.scala index c83cdfcc5..2134ba2ca 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionPointwiseQueries.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionPointwiseQueries.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.data.Offset import com.digitalasset.canton.platform.Party import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.dao.events.Raw import com.digitalasset.canton.platform.store.interning.StringInterning diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionStreamingQueries.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionStreamingQueries.scala index d6d31dbc3..e7eecb6be 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionStreamingQueries.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/common/TransactionStreamingQueries.scala @@ -7,7 +7,7 @@ import anorm.SqlParser.long import com.daml.lf.data.Ref import com.digitalasset.canton.platform.store.backend.EventStorageBackend import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.dao.events.Raw import com.digitalasset.canton.platform.store.interning.StringInterning import com.digitalasset.canton.platform.{Identifier, Party} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2EventStorageBackend.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2EventStorageBackend.scala index 203c324e8..ffe4e0d51 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2EventStorageBackend.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2EventStorageBackend.scala @@ -4,22 +4,21 @@ package com.digitalasset.canton.platform.store.backend.h2 import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.platform.store.backend.common.{ - EventStorageBackendTemplate, - ParameterStorageBackendImpl, -} +import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend +import com.digitalasset.canton.platform.store.backend.common.EventStorageBackendTemplate import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.interning.StringInterning class H2EventStorageBackend( ledgerEndCache: LedgerEndCache, stringInterning: StringInterning, + parameterStorageBackend: ParameterStorageBackend, loggerFactory: NamedLoggerFactory, ) extends EventStorageBackendTemplate( queryStrategy = H2QueryStrategy, ledgerEndCache = ledgerEndCache, stringInterning = stringInterning, participantAllDivulgedContractsPrunedUpToInclusive = - ParameterStorageBackendImpl.participantAllDivulgedContractsPrunedUpToInclusive, + parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive, loggerFactory = loggerFactory, ) {} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala index 18c0056a3..f5cf2edbc 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/h2/H2StorageBackendFactory.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.platform.store.backend.common.{ CompletionStorageBackendTemplate, ContractStorageBackendTemplate, IngestionStorageBackendTemplate, + ParameterStorageBackendImpl, PartyStorageBackendTemplate, } import com.digitalasset.canton.platform.store.backend.localstore.{ @@ -22,6 +23,7 @@ import com.digitalasset.canton.platform.store.backend.{ DataSourceStorageBackend, EventStorageBackend, IngestionStorageBackend, + ParameterStorageBackend, PartyStorageBackend, ResetStorageBackend, StorageBackendFactory, @@ -34,6 +36,9 @@ object H2StorageBackendFactory extends StorageBackendFactory with CommonStorageB override val createIngestionStorageBackend: IngestionStorageBackend[_] = new IngestionStorageBackendTemplate(H2QueryStrategy, H2Schema.schema) + override val createParameterStorageBackend: ParameterStorageBackend = + new ParameterStorageBackendImpl(H2QueryStrategy) + override def createPartyStorageBackend(ledgerEndCache: LedgerEndCache): PartyStorageBackend = new PartyStorageBackendTemplate(H2QueryStrategy, ledgerEndCache) @@ -60,6 +65,7 @@ object H2StorageBackendFactory extends StorageBackendFactory with CommonStorageB new H2EventStorageBackend( ledgerEndCache = ledgerEndCache, stringInterning = stringInterning, + parameterStorageBackend = createParameterStorageBackend, loggerFactory = loggerFactory, ) @@ -71,4 +77,5 @@ object H2StorageBackendFactory extends StorageBackendFactory with CommonStorageB override val createResetStorageBackend: ResetStorageBackend = H2ResetStorageBackend + } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/IdentityProviderStorageBackendImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/IdentityProviderStorageBackendImpl.scala index 8f88e922f..da40de125 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/IdentityProviderStorageBackendImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/IdentityProviderStorageBackendImpl.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.ledger.api.domain.{ JwksUrl, } import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import java.sql.Connection diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/ParticipantMetadataBackend.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/ParticipantMetadataBackend.scala index ee3bc5b60..d1279f7e9 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/ParticipantMetadataBackend.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/ParticipantMetadataBackend.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.platform.store.backend.localstore import anorm.SqlParser.{long, str} import anorm.{RowParser, SqlStringInterpolation, ~} -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import java.sql.Connection diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/PartyRecordStorageBackendImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/PartyRecordStorageBackendImpl.scala index 80a86e2ec..80c6819f3 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/PartyRecordStorageBackendImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/PartyRecordStorageBackendImpl.scala @@ -110,7 +110,7 @@ object PartyRecordStorageBackendImpl extends PartyRecordStorageBackend { parties: Set[Ref.Party], identityProviderId: Option[IdentityProviderId.Id], )(connection: Connection): Set[Ref.Party] = if (parties.nonEmpty) { - import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* + import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation val filteredParties = cSQL"party in (${parties.map(_.toString)})" @@ -132,7 +132,7 @@ object PartyRecordStorageBackendImpl extends PartyRecordStorageBackend { override def filterExistingParties( parties: Set[Ref.Party] )(connection: Connection): Set[Ref.Party] = if (parties.nonEmpty) { - import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* + import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation val filteredParties = cSQL"party in (${parties.map(_.toString)})" diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/UserManagementStorageBackendImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/UserManagementStorageBackendImpl.scala index 31173ffd5..70fb4659c 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/UserManagementStorageBackendImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/localstore/UserManagementStorageBackendImpl.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.ledger.api.domain.UserRight.{ ParticipantAdmin, } import com.digitalasset.canton.ledger.api.domain.{IdentityProviderId, UserRight} -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import com.digitalasset.canton.platform.store.backend.common.{ComposableQuery, QueryStrategy} import com.digitalasset.canton.platform.{LedgerString, Party, UserId} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleEventStorageBackend.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleEventStorageBackend.scala index 92f2932ae..be977c69c 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleEventStorageBackend.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleEventStorageBackend.scala @@ -4,22 +4,21 @@ package com.digitalasset.canton.platform.store.backend.oracle import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.platform.store.backend.common.{ - EventStorageBackendTemplate, - ParameterStorageBackendImpl, -} +import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend +import com.digitalasset.canton.platform.store.backend.common.EventStorageBackendTemplate import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.interning.StringInterning class OracleEventStorageBackend( ledgerEndCache: LedgerEndCache, stringInterning: StringInterning, + parameterStorageBackend: ParameterStorageBackend, loggerFactory: NamedLoggerFactory, ) extends EventStorageBackendTemplate( queryStrategy = OracleQueryStrategy, ledgerEndCache = ledgerEndCache, stringInterning = stringInterning, participantAllDivulgedContractsPrunedUpToInclusive = - ParameterStorageBackendImpl.participantAllDivulgedContractsPrunedUpToInclusive, + parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive, loggerFactory = loggerFactory, ) {} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleStorageBackendFactory.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleStorageBackendFactory.scala index 293f672f1..3e276bced 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleStorageBackendFactory.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/oracle/OracleStorageBackendFactory.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.platform.store.backend.common.{ CompletionStorageBackendTemplate, ContractStorageBackendTemplate, IngestionStorageBackendTemplate, + ParameterStorageBackendImpl, PartyStorageBackendTemplate, } import com.digitalasset.canton.platform.store.backend.{ @@ -18,6 +19,7 @@ import com.digitalasset.canton.platform.store.backend.{ DataSourceStorageBackend, EventStorageBackend, IngestionStorageBackend, + ParameterStorageBackend, PartyStorageBackend, ResetStorageBackend, StorageBackendFactory, @@ -30,6 +32,9 @@ object OracleStorageBackendFactory extends StorageBackendFactory with CommonStor override val createIngestionStorageBackend: IngestionStorageBackend[_] = new IngestionStorageBackendTemplate(OracleQueryStrategy, OracleSchema.schema) + override val createParameterStorageBackend: ParameterStorageBackend = + new ParameterStorageBackendImpl(OracleQueryStrategy) + override def createPartyStorageBackend(ledgerEndCache: LedgerEndCache): PartyStorageBackend = new PartyStorageBackendTemplate(OracleQueryStrategy, ledgerEndCache) @@ -53,6 +58,7 @@ object OracleStorageBackendFactory extends StorageBackendFactory with CommonStor new OracleEventStorageBackend( ledgerEndCache = ledgerEndCache, stringInterning = stringInterning, + parameterStorageBackend = createParameterStorageBackend, loggerFactory = loggerFactory, ) diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala index 0f2a2ec45..f79747b26 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresEventStorageBackend.scala @@ -4,22 +4,21 @@ package com.digitalasset.canton.platform.store.backend.postgresql import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.platform.store.backend.common.{ - EventStorageBackendTemplate, - ParameterStorageBackendImpl, -} +import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend +import com.digitalasset.canton.platform.store.backend.common.EventStorageBackendTemplate import com.digitalasset.canton.platform.store.cache.LedgerEndCache import com.digitalasset.canton.platform.store.interning.StringInterning class PostgresEventStorageBackend( ledgerEndCache: LedgerEndCache, stringInterning: StringInterning, + parameterStorageBackend: ParameterStorageBackend, loggerFactory: NamedLoggerFactory, ) extends EventStorageBackendTemplate( queryStrategy = PostgresQueryStrategy, ledgerEndCache = ledgerEndCache, stringInterning = stringInterning, participantAllDivulgedContractsPrunedUpToInclusive = - ParameterStorageBackendImpl.participantAllDivulgedContractsPrunedUpToInclusive, + parameterStorageBackend.participantAllDivulgedContractsPrunedUpToInclusive, loggerFactory = loggerFactory, ) {} diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala index 008236dfd..d16625834 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresQueryStrategy.scala @@ -3,12 +3,15 @@ package com.digitalasset.canton.platform.store.backend.postgresql +import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.{ CompositeSql, SqlStringInterpolation, } import com.digitalasset.canton.platform.store.backend.common.QueryStrategy +import java.sql.Connection + object PostgresQueryStrategy extends QueryStrategy { override def arrayContains(arrayColumnName: String, elementColumnName: String): String = @@ -34,4 +37,8 @@ object PostgresQueryStrategy extends QueryStrategy { override def analyzeTable(tableName: String): CompositeSql = cSQL"ANALYZE #$tableName" + + override def forceSynchronousCommitForCurrentTransactionForPostgreSQL( + connection: Connection + ): Unit = SQL"SET LOCAL synchronous_commit TO ON".execute()(connection).discard } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala index 4eb750c55..37429ada4 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/postgresql/PostgresStorageBackendFactory.scala @@ -16,6 +16,9 @@ final case class PostgresStorageBackendFactory(loggerFactory: NamedLoggerFactory override val createIngestionStorageBackend: IngestionStorageBackend[_] = new IngestionStorageBackendTemplate(PostgresQueryStrategy, PGSchema.schema) + override val createParameterStorageBackend: ParameterStorageBackend = + new ParameterStorageBackendImpl(PostgresQueryStrategy) + override def createPartyStorageBackend(ledgerEndCache: LedgerEndCache): PartyStorageBackend = new PartyStorageBackendTemplate(PostgresQueryStrategy, ledgerEndCache) @@ -39,6 +42,7 @@ final case class PostgresStorageBackendFactory(loggerFactory: NamedLoggerFactory new PostgresEventStorageBackend( ledgerEndCache = ledgerEndCache, stringInterning = stringInterning, + parameterStorageBackend = createParameterStorageBackend, loggerFactory = loggerFactory, ) diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala index 126c39185..bef0d6b22 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/events/TransactionLogUpdatesConversions.scala @@ -183,7 +183,7 @@ private[events] object TransactionLogUpdatesConversions { effectiveAt = Some(TimestampConversion.fromLf(transactionAccepted.effectiveAt)), events = flatEvents, offset = ApiOffset.toApiString(transactionAccepted.offset), - domainId = transactionAccepted.domainId.getOrElse(""), + domainId = transactionAccepted.domainId, traceContext = SerializableTraceContext(traceContext).toDamlProtoOpt, recordTime = Some(TimestampConversion.fromLf(transactionAccepted.recordTime)), ) @@ -388,7 +388,7 @@ private[events] object TransactionLogUpdatesConversions { offset = ApiOffset.toApiString(transactionAccepted.offset), eventsById = eventsById, rootEventIds = rootEventIds, - domainId = transactionAccepted.domainId.getOrElse(""), + domainId = transactionAccepted.domainId, traceContext = SerializableTraceContext(traceContext).toDamlProtoOpt, recordTime = Some(TimestampConversion.fromLf(transactionAccepted.recordTime)), ) diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala index b65fef16d..6f895f344 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/interfaces/TransactionLogUpdate.scala @@ -44,7 +44,7 @@ object TransactionLogUpdate { offset: Offset, events: Vector[Event], completionDetails: Option[CompletionDetails], - domainId: Option[String], + domainId: String, recordTime: Timestamp, ) extends TransactionLogUpdate diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala index 3b0492b4e..ef8a108e5 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/InMemoryStateSpec.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.platform import com.daml.lf.data.Ref import com.digitalasset.canton.TestEssentials import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.apiserver.services.tracking.SubmissionTracker import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend import com.digitalasset.canton.platform.store.backend.ParameterStorageBackend.LedgerEnd @@ -144,6 +145,7 @@ class InMemoryStateSpec extends AsyncFlatSpec with MockitoSugar with Matchers wi val dispatcherState = mock[DispatcherState] val updateStringInterningView = mock[(UpdatingStringInterningView, LedgerEnd) => Future[Unit]] val submissionTracker = mock[SubmissionTracker] + val commandProgressTracker = CommandProgressTracker.NoOp // Mocks should be called in the asserted order val inOrderMockCalls = Mockito.inOrder( @@ -163,6 +165,7 @@ class InMemoryStateSpec extends AsyncFlatSpec with MockitoSugar with Matchers wi stringInterningView = stringInterningView, dispatcherState = dispatcherState, submissionTracker = submissionTracker, + commandProgressTracker = commandProgressTracker, loggerFactory = loggerFactory, ) diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala index c23e2a99c..50488ac88 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.ledger.participant.state.{ import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.IndexComponentTest.{TestReadService, TestServices} +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.config.{IndexServiceConfig, ServerRole} import com.digitalasset.canton.platform.index.IndexServiceOwner import com.digitalasset.canton.platform.indexer.ha.HaConfig @@ -96,6 +97,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest { val indexResourceOwner = for { (inMemoryState, updaterFlow) <- LedgerApiServer.createInMemoryStateAndUpdater( + commandProgressTracker = CommandProgressTracker.NoOp, indexServiceConfig = IndexServiceConfig(), maxCommandsInFlight = 1, // not used metrics = LedgerApiServerMetrics.ForTesting, @@ -133,6 +135,7 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest { ), highAvailability = HaConfig(), indexServiceDbDispatcher = Some(dbSupport.dbDispatcher), + excludedPackageIds = Set.empty, ) contractLoader <- ContractLoader.create( contractStorageBackend = dbSupport.storageBackendFactory.createContractStorageBackend( diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutorSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutorSpec.scala index aaeb478fa..576aa4cf9 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutorSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/execution/StoreBackedCommandExecutorSpec.scala @@ -26,7 +26,7 @@ import com.digitalasset.canton.data.DeduplicationPeriod import com.digitalasset.canton.ledger.api.domain import com.digitalasset.canton.ledger.api.domain.{CommandId, Commands} import com.digitalasset.canton.ledger.api.util.TimeProvider -import com.digitalasset.canton.ledger.participant.state.ReadService +import com.digitalasset.canton.ledger.participant.state.WriteService import com.digitalasset.canton.ledger.participant.state.index.{ContractState, ContractStore} import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.metrics.LedgerApiServerMetrics @@ -117,7 +117,7 @@ class StoreBackedCommandExecutorSpec new StoreBackedCommandExecutor( engine, Ref.ParticipantId.assertFromString("anId"), - mock[ReadService], + mock[WriteService], mock[ContractStore], AuthorityResolver(), authenticateContract = _ => Right(()), @@ -328,7 +328,7 @@ class StoreBackedCommandExecutorSpec val sut = new StoreBackedCommandExecutor( mockEngine, Ref.ParticipantId.assertFromString("anId"), - mock[ReadService], + mock[WriteService], store, AuthorityResolver(), authenticateContract = _ => authenticationResult, diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala index 50b189cb3..d5fd53fda 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/ApiCommandSubmissionServiceSpec.scala @@ -17,6 +17,7 @@ import com.digitalasset.canton.ledger.api.validation.{ } import com.digitalasset.canton.logging.LoggingContextWithTrace import com.digitalasset.canton.metrics.LedgerApiServerMetrics +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.tracing.TestTelemetrySetup import io.opentelemetry.sdk.OpenTelemetrySdk import org.mockito.captor.ArgCaptor @@ -138,6 +139,7 @@ class ApiCommandSubmissionServiceSpec currentUtcTime = () => Instant.EPOCH, maxDeduplicationDuration = Duration.ZERO, submissionIdGenerator = () => Ref.SubmissionId.assertFromString(generatedSubmissionId), + tracker = CommandProgressTracker.NoOp, metrics = LedgerApiServerMetrics.ForTesting, telemetry = new DefaultOpenTelemetry(OpenTelemetrySdk.builder().build()), loggerFactory = loggerFactory, diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala index 8320b56a2..d2207a692 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/admin/ApiPackageManagementServiceSpec.scala @@ -11,26 +11,30 @@ import com.daml.ledger.api.v2.admin.package_management_service.{ ValidateDarFileRequest, ValidateDarFileResponse, } -import com.daml.lf.data.Ref +import com.daml.lf.data.Ref.{ApplicationId, CommandId, Party, SubmissionId, WorkflowId} +import com.daml.lf.data.{ImmArray, Ref} +import com.daml.lf.transaction.{GlobalKey, SubmittedTransaction} +import com.daml.lf.value.Value import com.daml.tracing.DefaultOpenTelemetry import com.daml.tracing.TelemetrySpecBase.* import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.data.{Offset, ProcessedDisclosedContract} import com.digitalasset.canton.ledger.api.health.HealthStatus import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.ledger.participant.state.{ InternalStateService, - ReadService, + PruningResult, + ReassignmentCommand, SubmissionResult, - Update, + SubmitterInfo, + TransactionMeta, } import com.digitalasset.canton.logging.SuppressionRule -import com.digitalasset.canton.tracing.{TestTelemetrySetup, TraceContext, Traced} +import com.digitalasset.canton.topology.DomainId +import com.digitalasset.canton.tracing.{TestTelemetrySetup, TraceContext} import com.google.protobuf.ByteString import io.opentelemetry.api.trace.Tracer import io.opentelemetry.sdk.OpenTelemetrySdk -import org.apache.pekko.NotUsed -import org.apache.pekko.stream.scaladsl.Source import org.mockito.{ArgumentMatchersSugar, MockitoSugar} import org.scalatest.BeforeAndAfterEach import org.scalatest.concurrent.Eventually @@ -38,6 +42,7 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec import org.slf4j.event.Level.DEBUG +import java.util.concurrent.CompletionStage import scala.concurrent.Future // TODO(#17635) Very thin layer. Revisit utility of testing @@ -111,8 +116,7 @@ class ApiPackageManagementServiceSpec private def createApiService(): PackageManagementServiceGrpc.PackageManagementService = { ApiPackageManagementService.createApiService( - TestReadService(testTelemetrySetup.tracer), - TestWritePackagesService(testTelemetrySetup.tracer), + TestWriteService(testTelemetrySetup.tracer), telemetry = new DefaultOpenTelemetry(OpenTelemetrySdk.builder().build()), loggerFactory = loggerFactory, ) @@ -122,8 +126,7 @@ class ApiPackageManagementServiceSpec object ApiPackageManagementServiceSpec { private val aSubmissionId = "aSubmission" - private final case class TestWritePackagesService(tracer: Tracer) - extends state.WritePackagesService { + private final case class TestWriteService(tracer: Tracer) extends state.WriteService { override def uploadDar( dar: ByteString, submissionId: Ref.SubmissionId, @@ -137,9 +140,7 @@ object ApiPackageManagementServiceSpec { ) Future.successful(state.SubmissionResult.Acknowledged) } - } - private final case class TestReadService(tracer: Tracer) extends ReadService { override def validateDar(dar: ByteString, darName: String)(implicit traceContext: TraceContext ): Future[SubmissionResult] = { @@ -151,11 +152,6 @@ object ApiPackageManagementServiceSpec { Future.successful(state.SubmissionResult.Acknowledged) } - override def stateUpdates(beginAfter: Option[Offset])(implicit - traceContext: TraceContext - ): Source[(Offset, Traced[Update]), NotUsed] = - throw new UnsupportedOperationException() - override def internalStateService: Option[InternalStateService] = throw new UnsupportedOperationException() @@ -167,5 +163,40 @@ object ApiPackageManagementServiceSpec { override def currentHealth(): HealthStatus = throw new UnsupportedOperationException() + + override def submitTransaction( + submitterInfo: SubmitterInfo, + optDomainId: Option[DomainId], + transactionMeta: TransactionMeta, + transaction: SubmittedTransaction, + estimatedInterpretationCost: Long, + globalKeyMapping: Map[GlobalKey, Option[Value.ContractId]], + processedDisclosedContracts: ImmArray[ProcessedDisclosedContract], + )(implicit traceContext: TraceContext): CompletionStage[SubmissionResult] = + throw new UnsupportedOperationException() + + override def submitReassignment( + submitter: Party, + applicationId: ApplicationId, + commandId: CommandId, + submissionId: Option[SubmissionId], + workflowId: Option[WorkflowId], + reassignmentCommand: ReassignmentCommand, + )(implicit traceContext: TraceContext): CompletionStage[SubmissionResult] = + throw new UnsupportedOperationException() + + override def allocateParty( + hint: Option[Party], + displayName: Option[String], + submissionId: SubmissionId, + )(implicit traceContext: TraceContext): CompletionStage[SubmissionResult] = + throw new UnsupportedOperationException() + + override def prune( + pruneUpToInclusive: Offset, + submissionId: SubmissionId, + pruneAllDivulgedContracts: Boolean, + ): CompletionStage[PruningResult] = + throw new UnsupportedOperationException() } } diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala index 649ced42c..8b858cb0c 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala @@ -28,6 +28,7 @@ import com.digitalasset.canton.ledger.participant.state.{ } import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.pekkostreams.dispatcher.Dispatcher +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.apiserver.services.tracking.SubmissionTracker import com.digitalasset.canton.platform.index.InMemoryStateUpdater.PrepareResult import com.digitalasset.canton.platform.index.InMemoryStateUpdaterSpec.* @@ -208,7 +209,7 @@ object InMemoryStateUpdaterSpec { offset = offset(1L), events = Vector(), completionDetails = None, - domainId = Some(domainId1.toProtoPrimitive), + domainId = domainId1.toProtoPrimitive, recordTime = Timestamp.Epoch, ) )(emptyTraceContext) @@ -295,6 +296,7 @@ object InMemoryStateUpdaterSpec { val dispatcherState: DispatcherState = mock[DispatcherState] val submissionTracker: SubmissionTracker = mock[SubmissionTracker] val dispatcher: Dispatcher[Offset] = mock[Dispatcher[Offset]] + val commandProgressTracker = CommandProgressTracker.NoOp val inOrder: InOrder = inOrder( ledgerEndCache, @@ -315,6 +317,7 @@ object InMemoryStateUpdaterSpec { stringInterningView = stringInterningView, dispatcherState = dispatcherState, submissionTracker = submissionTracker, + commandProgressTracker = commandProgressTracker, loggerFactory = loggerFactory, )(executorService) @@ -367,7 +370,7 @@ object InMemoryStateUpdaterSpec { offset = tx_accepted_withCompletionDetails_offset, events = (1 to 3).map(_ => mock[TransactionLogUpdate.Event]).toVector, completionDetails = Some(tx_accepted_completionDetails), - domainId = None, + domainId = domainId1.toProtoPrimitive, recordTime = Timestamp(1), ) )(emptyTraceContext) @@ -509,7 +512,6 @@ object InMemoryStateUpdaterSpec { commandId = Ref.CommandId.assertFromString("cmdId"), optDeduplicationPeriod = None, submissionId = None, - statistics = None, ), reasonTemplate = FinalReason(new Status()), domainId = DomainId.tryFromString("da::default"), diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala index d00c5279e..55bc99c34 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala @@ -24,6 +24,7 @@ import com.digitalasset.canton.logging.{ } import com.digitalasset.canton.metrics.LedgerApiServerMetrics import com.digitalasset.canton.platform.LedgerApiServer +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.config.{ CommandServiceConfig, IndexServiceConfig, @@ -254,6 +255,7 @@ class RecoveringIndexerIntegrationSpec (inMemoryState, inMemoryStateUpdaterFlow) <- LedgerApiServer .createInMemoryStateAndUpdater( + commandProgressTracker = CommandProgressTracker.NoOp, IndexServiceConfig(), CommandServiceConfig.DefaultMaxCommandsInFlight, metrics, @@ -291,6 +293,7 @@ class RecoveringIndexerIntegrationSpec ), highAvailability = HaConfig(), indexServiceDbDispatcher = Some(dbSupport.dbDispatcher), + excludedPackageIds = Set.empty, )(materializer, traceContext) } yield (participantState._2, dbSupport) } diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala index 9481f6b73..f622aff36 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala @@ -7,8 +7,8 @@ import cats.syntax.bifunctor.toBifunctorOps import com.daml.lf.crypto import com.daml.lf.data.Ref import com.daml.lf.data.Time.Timestamp +import com.daml.lf.transaction.CommittedTransaction import com.daml.lf.transaction.test.{TestNodeBuilder, TreeTransactionBuilder} -import com.daml.lf.transaction.{CommittedTransaction, TransactionNodeStatistics} import com.daml.lf.value.Value import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.api.health.HealthStatus @@ -153,7 +153,6 @@ object EndlessReadService { val workflowId: Ref.WorkflowId = Ref.WorkflowId.assertFromString("Workflow") val templateId: Ref.Identifier = Ref.Identifier.assertFromString("pkg:Mod:Template") val choiceName: Ref.Name = Ref.Name.assertFromString("SomeChoice") - val statistics: TransactionNodeStatistics = TransactionNodeStatistics.Empty // Note: all methods in this object MUST be fully deterministic def index(o: Offset): Int = Integer.parseInt(o.toHexString, 16) @@ -170,7 +169,6 @@ object EndlessReadService { commandId = commandId(i), optDeduplicationPeriod = None, submissionId = None, - statistics = Some(statistics), ) def transactionMeta(i: Int): TransactionMeta = TransactionMeta( ledgerEffectiveTime = recordTime(i), diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilityTestFixture.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilityTestFixture.scala index 7d8dacad5..dbdac171f 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilityTestFixture.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/IndexerStabilityTestFixture.scala @@ -10,6 +10,7 @@ import com.digitalasset.canton.ledger.api.health.ReportsHealth import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.metrics.{LedgerApiServerHistograms, LedgerApiServerMetrics} import com.digitalasset.canton.platform.LedgerApiServer +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.config.{CommandServiceConfig, IndexServiceConfig} import com.digitalasset.canton.platform.indexer.{ IndexerConfig, @@ -93,6 +94,7 @@ final class IndexerStabilityTestFixture(loggerFactory: NamedLoggerFactory) { (inMemoryState, inMemoryStateUpdaterFlow) <- LedgerApiServer .createInMemoryStateAndUpdater( + commandProgressTracker = CommandProgressTracker.NoOp, IndexServiceConfig(), CommandServiceConfig.DefaultMaxCommandsInFlight, metrics, @@ -123,6 +125,7 @@ final class IndexerStabilityTestFixture(loggerFactory: NamedLoggerFactory) { indexerWorkerLockId = lockIdSeed + 1, ), indexServiceDbDispatcher = None, + excludedPackageIds = Set.empty, ).acquire() } yield ReadServiceAndIndexer(readService, indexing) } diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala index e53547f3b..63dec107e 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/BatchingParallelIngestionPipeSpec.scala @@ -11,6 +11,7 @@ import org.scalatest.OptionValues import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers +import java.util.concurrent.atomic.AtomicInteger import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future, Promise, blocking} @@ -72,19 +73,28 @@ class BatchingParallelIngestionPipeSpec } it should "hold the stream if a single ingestion takes too long" in { + val ingestedSizeAcc = new AtomicInteger(0) + val ingestedTailAcc = new AtomicInteger(0) runPipe( inputMapperHook = () => Threading.sleep(1L), ingesterHook = batch => { // due to timing issues it can be that other than full batches are formed, so we check if the batch contains 21 + if (batch.min <= 21) { + ingestedSizeAcc.accumulateAndGet(batch.size, _ + _) + } + if (batch.max < 21) { + ingestedTailAcc.accumulateAndGet(batch.max, _ max _) + } if (batch.contains(21)) Threading.sleep(1000) }, timeout = FiniteDuration(100, "milliseconds"), ).map { case (ingested, ingestedTail, err) => err.value.getMessage shouldBe "timed out" - // 25 consists of 4 full batches before 21, then the one full batch after the frozen full batch since parallelism == 2 - // 25 is the ideal case: due to timing issues it can be that other than full batches are formed, so either having < 20 before and < 5 after is possiblef + // 25 is the ideal case: due to timing issues it can be that other than full batches are formed, so either having < 20 before and < 5 after is possible ingested.size should be <= 25 - ingestedTail.last shouldBe 20 + ingestedTail.last should be < 21 + ingested.size shouldBe ingestedSizeAcc.get() + ingestedTail.last shouldBe ingestedTailAcc.get() } } @@ -169,7 +179,7 @@ class BatchingParallelIngestionPipeSpec val semaphore = new Object var ingested: Vector[(Int, String)] = Vector.empty var ingestedTail: Vector[Int] = Vector.empty - val indexingSource: Source[Int, NotUsed] => Source[List[(Int, String)], NotUsed] = + val indexingFlow = BatchingParallelIngestionPipe[Int, List[(Int, Int)], List[(Int, String)]]( submissionBatchSize = MaxBatchSize.toLong, inputMappingParallelism = 2, @@ -217,7 +227,7 @@ class BatchingParallelIngestionPipeSpec val timeoutF = org.apache.pekko.pattern.after(timeout, system.scheduler) { Future.failed(new Exception("timed out")) } - val indexingF = indexingSource(inputSource).run().map { _ => + val indexingF = inputSource.via(indexingFlow).run().map { _ => (ingested, ingestedTail, Option.empty[Throwable]) } timeoutF.onComplete(p.tryComplete) diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala index 58ba1ec76..50fb89206 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/parallel/ParallelIndexerSubscriptionSpec.scala @@ -250,7 +250,6 @@ class ParallelIndexerSubscriptionSpec extends AnyFlatSpec with Matchers with Nam commandId = Ref.CommandId.assertFromString("c0"), optDeduplicationPeriod = None, submissionId = None, - statistics = Some(statistics), ) val someTransactionMeta = state.TransactionMeta( ledgerEffectiveTime = Time.Timestamp.assertFromLong(2), diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/PruningDtoQueries.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/PruningDtoQueries.scala index fc8460924..135c1b841 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/PruningDtoQueries.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/PruningDtoQueries.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.platform.store.backend import anorm.SqlParser.{long, str} import anorm.{RowParser, ~} import com.digitalasset.canton.platform.store.backend.common.ComposableQuery.SqlStringInterpolation -import com.digitalasset.canton.platform.store.backend.common.SimpleSqlAsVectorOf.* +import com.digitalasset.canton.platform.store.backend.common.SimpleSqlExtensions.* import java.sql.Connection diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpec.scala index 5995da258..1024cc1a1 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/StorageBackendSpec.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.platform.store.backend -import com.daml.ledger.resources.ResourceContext import com.digitalasset.canton.BaseTest import com.digitalasset.canton.platform.store.FlywayMigrations import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, TestSuite} @@ -74,7 +73,6 @@ trait StorageBackendSpec // Note: reusing the connection pool EC for initialization implicit val ec: ExecutionContext = connectionPoolExecutionContext - implicit val resourceContext: ResourceContext = ResourceContext(ec) val dataSourceFuture = for { _ <- new FlywayMigrations(jdbcUrl, loggerFactory = loggerFactory).migrate() diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala index 3df8fd6f2..ade7dd4de 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDtoSpec.scala @@ -1663,7 +1663,6 @@ object UpdateToDbDtoSpec { commandId = someCommandId, optDeduplicationPeriod = None, submissionId = Some(someSubmissionId), - statistics = None, ) private val someDomainId1 = DomainId.tryFromString("x::domain1") private val someTransactionMeta = state.TransactionMeta( diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDtoSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDtoSpec.scala index 1adc74bc5..fff368a80 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDtoSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/backend/UpdateToMeteringDbDtoSpec.scala @@ -6,12 +6,15 @@ package com.digitalasset.canton.platform.store.backend import com.daml.lf.crypto.Hash import com.daml.lf.data.{ImmArray, Ref, Time} import com.daml.lf.transaction.TransactionNodeStatistics.EmptyActions +import com.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder} import com.daml.lf.transaction.{ CommittedTransaction, + NodeId, TransactionNodeStatistics, TransactionVersion, VersionedTransaction, } +import com.daml.lf.value.Value import com.daml.metrics.api.testing.{InMemoryMetricsFactory, MetricValues} import com.daml.metrics.api.{MetricName, MetricsContext} import com.digitalasset.canton.data.Offset @@ -55,7 +58,6 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { commandId = Ref.CommandId.assertFromString("c0"), optDeduplicationPeriod = None, submissionId = None, - statistics = Some(statistics), ) val someTransactionMeta = state.TransactionMeta( ledgerEffectiveTime = Time.Timestamp.assertFromLong(2), @@ -67,11 +69,36 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { optByKeyNodes = None, ) + def someContractNode = TestNodeBuilder.create( + id = TransactionBuilder.newCid, + templateId = Ref.Identifier( + Ref.PackageId.assertFromString("abc"), + Ref.QualifiedName.assertFromString("Main:Template"), + ), + argument = Value.ValueUnit, + signatories = Set.empty, + observers = Set.empty, + ) + val someConsumingExerciseNode = TestNodeBuilder.exercise( + contract = someContractNode, + choice = Ref.Name.assertFromString("somechoice"), + consuming = true, + actingParties = Set.empty, + argument = Value.ValueUnit, + byKey = false, + ) val someTransactionAccepted = state.Update.TransactionAccepted( completionInfoO = Some(someCompletionInfo), transactionMeta = someTransactionMeta, - transaction = CommittedTransaction( - VersionedTransaction(TransactionVersion.VDev, Map.empty, ImmArray.empty) + transaction = TransactionBuilder.justCommitted( + someContractNode, + someContractNode, + someConsumingExerciseNode, + TestNodeBuilder.rollback( + ImmArray( + NodeId(2) + ) + ), ), transactionId = Ref.TransactionId.assertFromString("TransactionId"), recordTime = someRecordTime, @@ -84,7 +111,9 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { "extract transaction metering" in { val actual = - UpdateToMeteringDbDto(clock = () => timestamp, IndexedUpdatesMetrics)(MetricsContext.Empty)( + UpdateToMeteringDbDto(clock = () => timestamp, Set.empty, IndexedUpdatesMetrics)( + MetricsContext.Empty + )( List((Offset.fromHexString(offset), Traced[Update](someTransactionAccepted))) ) @@ -113,7 +142,9 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { val expected: Vector[DbDto.TransactionMetering] = Vector(metering) val actual = - UpdateToMeteringDbDto(clock = () => timestamp, IndexedUpdatesMetrics)(MetricsContext.Empty)( + UpdateToMeteringDbDto(clock = () => timestamp, Set.empty, IndexedUpdatesMetrics)( + MetricsContext.Empty + )( List( ( Offset.fromHexString(Ref.HexString.assertFromString("01")), @@ -132,7 +163,7 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { "return empty vector if input iterable is empty" in { val expected: Vector[DbDto.TransactionMetering] = Vector.empty - val actual = UpdateToMeteringDbDto(clock = () => timestamp, IndexedUpdatesMetrics)( + val actual = UpdateToMeteringDbDto(clock = () => timestamp, Set.empty, IndexedUpdatesMetrics)( MetricsContext.Empty )(List.empty) actual should equal(expected)(decided by DbDtoSeqEq) @@ -141,12 +172,16 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { // This is so infrastructure transactions, with a zero action count, are not included "filter zero action counts" in { - val txWithNoActionCount = someTransactionAccepted.copy(completionInfoO = - Some(someCompletionInfo.copy(statistics = Some(TransactionNodeStatistics.Empty))) + val txWithNoActionCount = someTransactionAccepted.copy( + transaction = CommittedTransaction( + VersionedTransaction(TransactionVersion.VDev, Map.empty, ImmArray.empty) + ) ) val actual = - UpdateToMeteringDbDto(clock = () => timestamp, IndexedUpdatesMetrics)(MetricsContext.Empty)( + UpdateToMeteringDbDto(clock = () => timestamp, Set.empty, IndexedUpdatesMetrics)( + MetricsContext.Empty + )( List((Offset.fromHexString(offset), Traced[Update](txWithNoActionCount))) ) @@ -155,7 +190,9 @@ class UpdateToMeteringDbDtoSpec extends AnyWordSpec with MetricValues { "increment metered events counter" in { val IndexedUpdatesMetrics = newUpdateMetrics - UpdateToMeteringDbDto(clock = () => timestamp, IndexedUpdatesMetrics)(MetricsContext.Empty)( + UpdateToMeteringDbDto(clock = () => timestamp, Set.empty, IndexedUpdatesMetrics)( + MetricsContext.Empty + )( List((Offset.fromHexString(offset), Traced[Update](someTransactionAccepted))) ) IndexedUpdatesMetrics.meteredEventsMeter.value shouldBe (statistics.committed.actions + statistics.rolledBack.actions) diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala index 0adfc3e15..1183a3d76 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/cache/InMemoryFanoutBufferSpec.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.platform.store.cache.InMemoryFanoutBuffer.{ } import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate.CompletionDetails +import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.Traced import org.scalatest.Succeeded import org.scalatest.compatible.Assertion @@ -37,6 +38,7 @@ class InMemoryFanoutBufferSpec private val offsetIdx = Vector(2, 4, 6, 8, 10) private val BeginOffset = offset(0L) private val offsets = offsetIdx.map(i => offset(i.toLong)) + private val someDomainId = DomainId.tryFromString("some::domain-id") private val IdentityFilter: Traced[TransactionLogUpdate] => Option[TransactionLogUpdate] = tracedUpdate => Some(tracedUpdate.value) @@ -499,7 +501,7 @@ class InMemoryFanoutBufferSpec events = Vector.empty, completionDetails = None, commandId = "", - domainId = None, + domainId = someDomainId.toProtoPrimitive, recordTime = Time.Timestamp.Epoch, ) diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala index ff4fa88e5..1ca8c3ae9 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedStreamsReaderSpec.scala @@ -12,6 +12,7 @@ import com.digitalasset.canton.platform.store.cache.InMemoryFanoutBuffer import com.digitalasset.canton.platform.store.dao.BufferedStreamsReader.FetchFromPersistence import com.digitalasset.canton.platform.store.dao.BufferedStreamsReaderSpec.* import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate +import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.Traced import com.digitalasset.canton.{BaseTest, HasExecutionContext, HasExecutorServiceGeneric} import org.apache.pekko.stream.scaladsl.{Sink, Source} @@ -465,6 +466,8 @@ object BufferedStreamsReaderSpec { } } + private val someDomainId = DomainId.tryFromString("some::domain-id") + private def transaction(discriminator: String) = TransactionLogUpdate.TransactionAccepted( transactionId = discriminator, @@ -474,7 +477,7 @@ object BufferedStreamsReaderSpec { offset = Offset.beforeBegin, events = Vector(null), completionDetails = None, - domainId = None, + domainId = someDomainId.toProtoPrimitive, recordTime = Timestamp.Epoch, ) diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionByIdReaderSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionByIdReaderSpec.scala index 858611541..03a9b37f1 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionByIdReaderSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/BufferedTransactionByIdReaderSpec.scala @@ -15,6 +15,7 @@ import com.digitalasset.canton.platform.store.dao.BufferedTransactionByIdReader. ToApiResponse, } import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate +import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.Traced import org.mockito.MockitoSugar import org.scalatest.flatspec.AsyncFlatSpec @@ -29,6 +30,7 @@ class BufferedTransactionByIdReaderSpec extends AsyncFlatSpec with MockitoSugar ) private val requestingParties = Set("p1", "p2").map(Ref.Party.assertFromString) + private val someDomainId = DomainId.tryFromString("some::domain-id") private val bufferedTransactionId1 = "bufferedTid_1" private val bufferedTransactionId2 = "bufferedTid_2" @@ -103,7 +105,7 @@ class BufferedTransactionByIdReaderSpec extends AsyncFlatSpec with MockitoSugar offset = Offset.beforeBegin, events = Vector(null), completionDetails = None, - domainId = None, + domainId = someDomainId.toProtoPrimitive, recordTime = Timestamp.Epoch, ) } diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala index 4b8a19d64..d63bcac16 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala @@ -91,7 +91,10 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base _ <- new ResourceOwner[Unit] { override def acquire()(implicit context: ResourceContext): Resource[Unit] = PureResource( - new FlywayMigrations(dbConfig.jdbcUrl, loggerFactory = loggerFactory).migrate() + new FlywayMigrations(dbConfig.jdbcUrl, loggerFactory = loggerFactory)( + ec, + traceContext, + ).migrate() ) } dbSupport <- DbSupport.owner( diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala index 6063d85f9..df9e6d2d4 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoCompletionsSpec.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.platform.store.dao import com.daml.ledger.api.v2.command_completion_service.CompletionStreamResponse import com.daml.lf.data.Ref import com.daml.lf.data.Time.Timestamp -import com.daml.lf.transaction.TransactionNodeStatistics import com.digitalasset.canton.data.Offset import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.platform.ApiOffset @@ -242,7 +241,6 @@ private[dao] trait JdbcLedgerDaoCompletionsSpec extends OptionValues with LoneEl commandId = commandId, optDeduplicationPeriod = None, submissionId = Some(submissionId), - statistics = Some(statistics), ) ), recordTime = Timestamp.now(), @@ -267,7 +265,6 @@ private[dao] trait JdbcLedgerDaoCompletionsSpec extends OptionValues with LoneEl commandId = commandId, optDeduplicationPeriod = None, submissionId = Some(submissionId), - statistics = Some(statistics), ) ), recordTime = Timestamp.now(), @@ -285,7 +282,6 @@ private[dao] object JdbcLedgerDaoCompletionsSpec { private val party2 = Ref.Party.assertFromString("JdbcLedgerDaoCompletionsSpec2") private val party3 = Ref.Party.assertFromString("JdbcLedgerDaoCompletionsSpec3") private val parties = Set(party1, party2, party3) - private val statistics = TransactionNodeStatistics.Empty @SuppressWarnings(Array("org.wartremover.warts.OptionPartial")) private def offsetOf(response: CompletionStreamResponse): Offset = diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala index 3d0fc12f5..dd6f2b80d 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoSuite.scala @@ -41,17 +41,12 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa new AtomicReference[Option[Offset]](Option.empty) protected final val nextOffset: () => Offset = { - val base = BigInt(1) << 32 - val counter = new AtomicLong(0) + val counter = new AtomicLong(1) () => { - Offset.fromByteArray((base + counter.getAndIncrement()).toByteArray) + Offset.fromLong(counter.getAndIncrement()) } } - protected final implicit class OffsetToLong(offset: Offset) { - def toLong: Long = BigInt(offset.toByteArray).toLong - } - private[this] lazy val dar = TestModels.com_daml_ledger_test_ModelTestDar_path .pipe(JarResourceUtils.resourceFileFromJar) @@ -642,7 +637,6 @@ private[dao] trait JdbcLedgerDaoSuite extends JdbcLedgerDaoBackend with OptionVa commandId, None, Some(submissionId), - Some(TransactionNodeStatistics(entry.transaction)), ) protected final def store( diff --git a/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala b/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala index ff567ebe8..1ce4f4016 100644 --- a/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala +++ b/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala @@ -19,6 +19,7 @@ import com.digitalasset.canton.ledger.participant.state.{ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.{LedgerApiServerHistograms, LedgerApiServerMetrics} import com.digitalasset.canton.platform.LedgerApiServer +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.indexer.ha.HaConfig import com.digitalasset.canton.platform.indexer.{Indexer, IndexerServiceOwner, JdbcIndexer} import com.digitalasset.canton.platform.store.DbSupport.DataSourceProperties @@ -74,6 +75,7 @@ class IndexerBenchmark extends NamedLogging { (inMemoryState, inMemoryStateUpdaterFlow) <- LedgerApiServer .createInMemoryStateAndUpdater( + CommandProgressTracker.NoOp, config.indexServiceConfig, 256, metrics, @@ -86,6 +88,7 @@ class IndexerBenchmark extends NamedLogging { config.participantId, config.dataSource, config.indexerConfig, + Set.empty, readService, metrics, inMemoryState, @@ -143,7 +146,7 @@ class IndexerBenchmark extends NamedLogging { Await .result( IndexerServiceOwner - .migrateOnly(config.dataSource.jdbcUrl, loggerFactory) + .migrateOnly(config.dataSource.jdbcUrl, loggerFactory)(rc.executionContext, traceContext) .map(_ => indexerFactory.initialized(logger))(indexerExecutionContext), Duration(5, "minute"), ) diff --git a/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml index ca939ddae..f3002b1c9 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --enable-interfaces=yes name: carbonv1-tests diff --git a/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml index f98b1b05a..60884599c 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --enable-interfaces=yes name: carbonv2-tests diff --git a/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml index ec563c73a..fe87dedcb 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 name: experimental-tests source: . version: 3.1.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml index a47827095..873943dc5 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --enable-interfaces=yes name: model-tests diff --git a/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml index a8f6d8b09..ab37edfec 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 name: package-management-tests source: . version: 3.1.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/semantic/DivulgenceTests.daml b/community/ledger/ledger-common-dars/src/main/daml/semantic/DivulgenceTests.daml index 1a5f5c842..ddc6141c0 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/semantic/DivulgenceTests.daml +++ b/community/ledger/ledger-common-dars/src/main/daml/semantic/DivulgenceTests.daml @@ -68,3 +68,16 @@ template Dummy holder: Party where signatory holder + +template DummyFlexibleController + with + holder: Party + where + signatory holder + + choice FlexibleConsume: () + with + actor: Party + controller actor + do + pure () diff --git a/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml index 384daf09d..d7c7bc6c7 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --enable-interfaces=yes name: semantic-tests diff --git a/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml index 125d225f0..dfc6ab919 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 name: upgrade-tests source: . version: 1.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml index 48100167a..b750f4f66 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 name: upgrade-tests source: . version: 2.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml index 1cb78b932..be8913fa6 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 name: upgrade-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json2/JsVersionService.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json2/JsVersionService.scala index 9262d58a5..bc51d74b6 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json2/JsVersionService.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json2/JsVersionService.scala @@ -38,6 +38,7 @@ class JsVersionService(versionClient: VersionClient)(implicit object JsVersionServiceCodecs { implicit val est: Codec[experimental_features.ExperimentalStaticTime] = deriveCodec + implicit val ecis: Codec[experimental_features.ExperimentalCommandInspectionService] = deriveCodec implicit val ef: Codec[experimental_features.ExperimentalFeatures] = deriveCodec implicit val umf: Codec[version_service.UserManagementFeature] = deriveCodec implicit val pmf: Codec[version_service.PartyManagementFeature] = deriveCodec diff --git a/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml b/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml index 703b42148..194f2a551 100644 --- a/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml +++ b/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: JsonEncodingTest diff --git a/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml b/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml index 3d971885b..975981d4a 100644 --- a/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml +++ b/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.dev name: JsonEncodingTestDev diff --git a/community/participant/src/main/daml/daml.yaml b/community/participant/src/main/daml/daml.yaml index ce84b2754..593d57c03 100644 --- a/community/participant/src/main/daml/daml.yaml +++ b/community/participant/src/main/daml/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.1.0-snapshot.20240613.13124.0.v24e0f5e8 +sdk-version: 3.1.0-snapshot.20240624.13145.0.v551f7a20 build-options: - --target=2.1 name: AdminWorkflows diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/CantonLedgerApiServerFactory.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/CantonLedgerApiServerFactory.scala index 585e5e9db..393632c68 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/CantonLedgerApiServerFactory.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/CantonLedgerApiServerFactory.scala @@ -7,7 +7,6 @@ import cats.Eval import cats.data.EitherT import cats.syntax.either.* import com.daml.lf.engine.Engine -import com.digitalasset.canton.LedgerParticipantId import com.digitalasset.canton.concurrent.{ ExecutionContextIdlenessExecutorService, FutureSupervisor, @@ -27,6 +26,7 @@ import com.digitalasset.canton.platform.apiserver.meteringreport.MeteringReportK import com.digitalasset.canton.platform.indexer.ha.HaConfig import com.digitalasset.canton.time.* import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} +import com.digitalasset.canton.{LedgerParticipantId, LfPackageId} import org.apache.pekko.actor.ActorSystem class CantonLedgerApiServerFactory( @@ -49,6 +49,7 @@ class CantonLedgerApiServerFactory( httpApiMetrics: HttpApiMetrics, tracerProvider: TracerProvider, adminToken: CantonAdminToken, + excludedPackageIds: Set[LfPackageId], )(implicit executionContext: ExecutionContextIdlenessExecutorService, traceContext: TraceContext, @@ -110,6 +111,7 @@ class CantonLedgerApiServerFactory( cantonParameterConfig = parameters, testingTimeService = ledgerTestingTimeService, adminToken = adminToken, + enableCommandInspection = config.ledgerApi.enableCommandInspection, loggerFactory = loggerFactory, tracerProvider = tracerProvider, metrics = metrics, @@ -127,6 +129,7 @@ class CantonLedgerApiServerFactory( startLedgerApiServer = sync.isActive(), futureSupervisor = futureSupervisor, parameters = parameters, + excludedPackageIds = excludedPackageIds, )(executionContext, actorSystem) .leftMap { err => // The MigrateOnEmptySchema exception is private, thus match on the expected message diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/GlobalOffset.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/GlobalOffset.scala index 61adc51bc..d5d0ae881 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/GlobalOffset.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/GlobalOffset.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant import cats.syntax.either.* import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveLong} +import com.digitalasset.canton.data.Offset import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import slick.jdbc.{GetResult, SetParameter} @@ -29,6 +30,8 @@ final case class GlobalOffset(v: PositiveLong) extends Ordered[GlobalOffset] wit def +(i: PositiveLong): GlobalOffset = new GlobalOffset(v + i) def +(i: NonNegativeInt): GlobalOffset = new GlobalOffset(v.tryAdd(i.unwrap.toLong)) + + def toLedgerOffset: Offset = Offset.fromLong(v.value) } object GlobalOffset { @@ -49,4 +52,11 @@ object GlobalOffset { def fromLong(i: Long): Either[String, GlobalOffset] = PositiveLong .create(i) .bimap(_ => s"Expecting positive value for global offset; found $i", GlobalOffset(_)) + + def tryFromLedgerOffset(offset: Offset): GlobalOffset = + if (offset == Offset.beforeBegin) + throw new IllegalArgumentException( + "offset expected to be an explicit offset, not before-begin" + ) + else tryFromLong(offset.toLong) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala index e115d5281..dd9b1d783 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala @@ -10,6 +10,7 @@ import cats.syntax.option.* import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.lf.engine.Engine import com.daml.nameof.NameOf.functionFullName +import com.digitalasset.canton.LfPackageId import com.digitalasset.canton.admin.participant.v30.* import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService @@ -34,6 +35,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.{CantonGrpcUtil, StaticGrpcServices} import com.digitalasset.canton.participant.admin.* import com.digitalasset.canton.participant.admin.grpc.* +import com.digitalasset.canton.participant.admin.workflows.java.canton import com.digitalasset.canton.participant.config.* import com.digitalasset.canton.participant.domain.grpc.GrpcDomainRegistry import com.digitalasset.canton.participant.domain.{DomainAliasManager, DomainAliasResolution} @@ -69,6 +71,7 @@ import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.time.EnrichedDurations.* import com.digitalasset.canton.time.* import com.digitalasset.canton.time.admin.v30.DomainTimeServiceGrpc +import com.digitalasset.canton.topology.TopologyManagerError.InvalidTopologyMapping import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.{ DomainTopologyClient, @@ -254,21 +257,30 @@ class ParticipantNodeBootstrap( protocolVersion: ProtocolVersion, )(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Unit] = { + ): EitherT[FutureUnlessShutdown, ParticipantTopologyManagerError, Unit] = for { + ptp <- EitherT.fromEither[FutureUnlessShutdown]( + PartyToParticipant + .create( + partyId, + None, + threshold = PositiveInt.one, + participants = + Seq(HostingParticipant(participantId, ParticipantPermission.Submission)), + groupAddressing = false, + ) + .leftMap(err => + ParticipantTopologyManagerError.IdentityManagerParentError( + InvalidTopologyMapping.Reject(err) + ) + ) + ) // TODO(#14069) make this "extend" / not replace // this will also be potentially racy! - performUnlessClosingEitherUSF(functionFullName)( + _ <- performUnlessClosingEitherUSF(functionFullName)( topologyManager .proposeAndAuthorize( TopologyChangeOp.Replace, - PartyToParticipant( - partyId, - None, - threshold = PositiveInt.one, - participants = - Seq(HostingParticipant(participantId, ParticipantPermission.Submission)), - groupAddressing = false, - ), + ptp, serial = None, // TODO(#12390) auto-determine signing keys signingKeys = Seq(partyId.uid.namespace.fingerprint), @@ -278,7 +290,7 @@ class ParticipantNodeBootstrap( ) .leftMap(IdentityManagerParentError(_): ParticipantTopologyManagerError) .map(_ => ()) - } + } yield () } @@ -543,6 +555,20 @@ class ParticipantNodeBootstrap( ) ) + excludedPackageIds = + if (parameters.excludeInfrastructureTransactions) { + Set( + canton.internal.ping.Ping.TEMPLATE_ID, + canton.internal.bong.BongProposal.TEMPLATE_ID, + canton.internal.bong.Bong.TEMPLATE_ID, + canton.internal.bong.Merge.TEMPLATE_ID, + canton.internal.bong.Explode.TEMPLATE_ID, + canton.internal.bong.Collapse.TEMPLATE_ID, + ).map(x => LfPackageId.assertFromString(x.getPackageId)) + } else { + Set.empty[LfPackageId] + } + ephemeralState = ParticipantNodeEphemeralState( participantId, persistentState, @@ -690,6 +716,7 @@ class ParticipantNodeBootstrap( arguments.metrics.httpApiServer, tracerProvider, adminToken, + excludedPackageIds, ) } yield { @@ -853,6 +880,7 @@ object ParticipantNodeBootstrap { override protected def createEngine(arguments: Arguments): Engine = DAMLe.newEngine( enableLfDev = arguments.parameterConfig.devVersionSupport, + enableLfBeta = arguments.parameterConfig.betaVersionSupport, enableStackTraces = arguments.parameterConfig.engine.enableEngineStackTraces, iterationsBetweenInterruptions = arguments.parameterConfig.engine.iterationsBetweenInterruptions, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala index 0e2b4296d..0f3513d5b 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeParameters.scala @@ -21,6 +21,7 @@ import com.digitalasset.canton.participant.config.{ ParticipantStoreConfig, PartyNotificationConfig, } +import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig import com.digitalasset.canton.sequencing.client.SequencerClientConfig import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.tracing.TracingConfig @@ -40,10 +41,12 @@ final case class ParticipantNodeParameters( journalGarbageCollectionDelay: NonNegativeFiniteDuration, disableUpgradeValidation: Boolean, allowForUnauthenticatedContractIds: Boolean, + commandProgressTracking: CommandProgressTrackerConfig, ) extends CantonNodeParameters with HasGeneralCantonNodeParameters { override def dontWarnOnDeprecatedPV: Boolean = protocolConfig.dontWarnOnDeprecatedPV override def devVersionSupport: Boolean = protocolConfig.devVersionSupport + override def betaVersionSupport: Boolean = protocolConfig.betaVersionSupport } object ParticipantNodeParameters { @@ -80,6 +83,7 @@ object ParticipantNodeParameters { Some(testedProtocolVersion), // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version devVersionSupport = true, + betaVersionSupport = true, dontWarnOnDeprecatedPV = false, ), ledgerApiServerParameters = LedgerApiServerParametersConfig(), @@ -88,5 +92,6 @@ object ParticipantNodeParameters { journalGarbageCollectionDelay = NonNegativeFiniteDuration.Zero, disableUpgradeValidation = false, allowForUnauthenticatedContractIds = false, + commandProgressTracking = CommandProgressTrackerConfig(), ) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala index 476c97302..0e0a95466 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.admin.grpc import cats.syntax.either.* import cats.syntax.parallel.* -import com.digitalasset.canton.LedgerTransactionId import com.digitalasset.canton.admin.participant.v30.InspectionServiceGrpc.InspectionService import com.digitalasset.canton.admin.participant.v30.{ GetConfigForSlowCounterParticipants, @@ -15,7 +14,6 @@ import com.digitalasset.canton.admin.participant.v30.{ LookupOffsetByTime, LookupReceivedAcsCommitments, LookupSentAcsCommitments, - LookupTransactionDomain, SetConfigForSlowCounterParticipants, } import com.digitalasset.canton.data.CantonTimestamp @@ -25,6 +23,7 @@ import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.FutureInstances.* import io.grpc.{Status, StatusRuntimeException} +import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} class GrpcInspectionService(syncStateInspection: SyncStateInspection)(implicit @@ -56,26 +55,6 @@ class GrpcInspectionService(syncStateInspection: SyncStateInspection)(implicit } } - override def lookupTransactionDomain( - request: LookupTransactionDomain.Request - ): Future[LookupTransactionDomain.Response] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - LedgerTransactionId.fromString(request.transactionId) match { - case Left(err) => - Future.failed( - new IllegalArgumentException( - s"""String "${request.transactionId}" doesn't parse as a transaction ID: $err""" - ) - ) - case Right(txId) => - syncStateInspection.lookupTransactionDomain(txId).map { domainId => - LookupTransactionDomain.Response( - domainId.fold(throw new StatusRuntimeException(Status.NOT_FOUND))(_.toProtoPrimitive) - ) - } - } - } - override def lookupOffsetByTime( request: LookupOffsetByTime.Request ): Future[LookupOffsetByTime.Response] = { @@ -94,6 +73,7 @@ class GrpcInspectionService(syncStateInspection: SyncStateInspection)(implicit } } + @nowarn("msg=usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer") override def lookupOffsetByIndex( request: LookupOffsetByIndex.Request ): Future[LookupOffsetByIndex.Response] = { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala index c1ebcca08..5005b3a12 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala @@ -49,13 +49,7 @@ import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.canton.util.{EitherTUtil, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{ - DomainAlias, - LedgerTransactionId, - LfPartyId, - RequestCounter, - TransferCounter, -} +import com.digitalasset.canton.{DomainAlias, LfPartyId, RequestCounter, TransferCounter} import java.io.OutputStream import java.time.Instant @@ -115,13 +109,6 @@ final class SyncStateInspection( .map(_.flatten.toMap) } - def lookupTransactionDomain(transactionId: LedgerTransactionId)(implicit - traceContext: TraceContext - ): Future[Option[DomainId]] = - participantNodePersistentState.value.multiDomainEventLog - .lookupTransactionDomain(transactionId) - .value - /** returns the potentially big ACS of a given domain */ def findAcs( domainAlias: DomainAlias @@ -295,6 +282,10 @@ final class SyncStateInspection( .findM { case (_, store) => AcsInspection.hasActiveContracts(store, partyId) } .map(_.nonEmpty) + @deprecated( + "usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer", + "3.1", + ) def findAcceptedTransactions( domain: Option[DomainAlias] = None, from: Option[CantonTimestamp] = None, @@ -316,6 +307,10 @@ final class SyncStateInspection( * multi-domain event log. `from` and `to` only have an effect if the domain isn't empty. * @throws scala.RuntimeException (by Await.result and if lookup fails) */ + @deprecated( + "usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer", + "3.1", + ) def findEvents( domain: Option[DomainAlias] = None, from: Option[CantonTimestamp] = None, @@ -460,6 +455,10 @@ final class SyncStateInspection( } /** Update the prehead for clean requests to the given value, bypassing all checks. Only used for testing. */ + @deprecated( + "usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer", + "3.1", + ) def forceCleanPrehead( newHead: Option[RequestCounterCursorPrehead], domain: DomainAlias, @@ -471,6 +470,10 @@ final class SyncStateInspection( .toRight(s"Unknown domain $domain") } + @deprecated( + "usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer", + "3.1", + ) def forceCleanSequencerCounterPrehead( newHead: Option[SequencerCounterCursorPrehead], domain: DomainAlias, @@ -497,6 +500,10 @@ final class SyncStateInspection( private[this] def getPersistentState(domain: DomainAlias): Option[SyncDomainPersistentState] = syncDomainPersistentStateManager.getByAlias(domain) + @deprecated( + "usage being removed as part of fusing MultiDomainEventLog and Ledger API Indexer", + "3.1", + ) def locateOffset( numTransactions: Long )(implicit traceContext: TraceContext): Future[Either[String, ParticipantOffset]] = { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala index 81034dea3..b53922257 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala @@ -14,6 +14,7 @@ import com.digitalasset.canton.http.HttpApiConfig import com.digitalasset.canton.networking.grpc.CantonServerBuilder import com.digitalasset.canton.participant.admin.AdminWorkflowConfig import com.digitalasset.canton.participant.config.LedgerApiServerConfig.DefaultRateLimit +import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig import com.digitalasset.canton.platform.apiserver.ApiServiceOwner import com.digitalasset.canton.platform.apiserver.SeedService.Seeding import com.digitalasset.canton.platform.apiserver.configuration.RateLimitingConfig @@ -95,6 +96,7 @@ object PartyNotificationConfig { final case class ParticipantProtocolConfig( minimumProtocolVersion: Option[ProtocolVersion], override val devVersionSupport: Boolean, + override val betaVersionSupport: Boolean, override val dontWarnOnDeprecatedPV: Boolean, ) extends ProtocolConfig @@ -170,6 +172,8 @@ final case class RemoteParticipantConfig( * @param databaseConnectionTimeout database connection timeout * @param additionalMigrationPaths optional extra paths for the database migrations * @param rateLimit limit the ledger api server request rates based on system metrics + * @param enableExplicitDisclosure enable usage of explicitly disclosed contracts in command submission and transaction validation. + * @param enableCommandInspection enable command inspection service over the ledger api */ final case class LedgerApiServerConfig( address: String = "127.0.0.1", @@ -190,6 +194,7 @@ final case class LedgerApiServerConfig( databaseConnectionTimeout: config.NonNegativeFiniteDuration = LedgerApiServerConfig.DefaultDatabaseConnectionTimeout, rateLimit: Option[RateLimitingConfig] = Some(DefaultRateLimit), + enableCommandInspection: Boolean = true, adminToken: Option[String] = None, identityProviderManagement: IdentityProviderManagementConfig = LedgerApiServerConfig.DefaultIdentityProviderManagementConfig, @@ -353,6 +358,7 @@ final case class ParticipantNodeParameterConfig( ), // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version devVersionSupport: Boolean = true, + BetaVersionSupport: Boolean = false, dontWarnOnDeprecatedPV: Boolean = false, warnIfOverloadedFor: Option[config.NonNegativeFiniteDuration] = Some( config.NonNegativeFiniteDuration.ofSeconds(20) @@ -367,6 +373,7 @@ final case class ParticipantNodeParameterConfig( allowForUnauthenticatedContractIds: Boolean = false, watchdog: Option[WatchdogConfig] = None, packageMetadataView: PackageMetadataViewConfig = PackageMetadataViewConfig(), + commandProgressTracker: CommandProgressTrackerConfig = CommandProgressTrackerConfig(), ) extends LocalNodeParametersConfig /** Parameters for the participant node's stores diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala index f3f5f988e..a1a462ba7 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala @@ -7,7 +7,6 @@ import cats.data.EitherT import cats.syntax.either.* import com.daml.lf.engine.Engine import com.daml.tracing.DefaultOpenTelemetry -import com.digitalasset.canton.LedgerParticipantId import com.digitalasset.canton.concurrent.{ ExecutionContextIdlenessExecutorService, FutureSupervisor, @@ -28,6 +27,7 @@ import com.digitalasset.canton.platform.indexer.IndexerConfig import com.digitalasset.canton.platform.indexer.ha.HaConfig import com.digitalasset.canton.platform.store.DbSupport import com.digitalasset.canton.tracing.{NoTracing, TracerProvider} +import com.digitalasset.canton.{LedgerParticipantId, LfPackageId} import org.apache.pekko.actor.ActorSystem import scala.util.{Failure, Success} @@ -39,21 +39,21 @@ object CantonLedgerApiServerWrapper extends NoTracing { /** Config for ledger API server and indexer * - * @param serverConfig ledger API server configuration - * @param jsonApiConfig JSON API configuration - * @param indexerConfig indexer configuration - * @param indexerLockIds Optional lock IDs to be used for indexer HA - * @param participantId unique participant id used e.g. for a unique ledger API server index db name - * @param engine daml engine shared with Canton for performance reasons - * @param syncService canton sync service implementing both read and write services - * @param storageConfig canton storage config so that indexer can share the participant db - * @param cantonParameterConfig configurations meant to be overridden primarily in tests (applying to all participants) - * @param testingTimeService an optional service during testing for advancing time, participant-specific - * @param adminToken canton admin token for ledger api auth - * @param loggerFactory canton logger factory - * @param tracerProvider tracer provider for open telemetry grpc injection - * @param metrics upstream metrics module - * @param maxDeduplicationDuration maximum time window during which commands can be deduplicated. + * @param serverConfig ledger API server configuration + * @param jsonApiConfig JSON API configuration + * @param indexerConfig indexer configuration + * @param indexerHaConfig configuration for indexer HA + * @param participantId unique participant id used e.g. for a unique ledger API server index db name + * @param engine daml engine shared with Canton for performance reasons + * @param syncService canton sync service implementing both read and write services + * @param storageConfig canton storage config so that indexer can share the participant db + * @param cantonParameterConfig configurations meant to be overridden primarily in tests (applying to all participants) + * @param testingTimeService an optional service during testing for advancing time, participant-specific + * @param adminToken canton admin token for ledger api auth + * @param enableCommandInspection whether canton should support inspection service or not + * @param loggerFactory canton logger factory + * @param tracerProvider tracer provider for open telemetry grpc injection + * @param metrics upstream metrics module */ final case class Config( serverConfig: LedgerApiServerConfig, @@ -67,6 +67,7 @@ object CantonLedgerApiServerWrapper extends NoTracing { cantonParameterConfig: ParticipantNodeParameters, testingTimeService: Option[TimeServiceBackend], adminToken: CantonAdminToken, + enableCommandInspection: Boolean, override val loggerFactory: NamedLoggerFactory, tracerProvider: TracerProvider, metrics: LedgerApiServerMetrics, @@ -90,6 +91,7 @@ object CantonLedgerApiServerWrapper extends NoTracing { parameters: ParticipantNodeParameters, startLedgerApiServer: Boolean, futureSupervisor: FutureSupervisor, + excludedPackageIds: Set[LfPackageId], )(implicit ec: ExecutionContextIdlenessExecutorService, actorSystem: ActorSystem, @@ -127,6 +129,8 @@ object CantonLedgerApiServerWrapper extends NoTracing { telemetry = new DefaultOpenTelemetry(config.tracerProvider.openTelemetry), futureSupervisor = futureSupervisor, parameters = parameters, + commandProgressTracker = config.syncService.commandProgressTracker, + excludedPackageIds = excludedPackageIds, ) val startFUS = for { _ <- diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala index 8763947ba..ef941c006 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala @@ -4,12 +4,12 @@ package com.digitalasset.canton.participant.ledger.api import com.daml.executors.executors.{NamedExecutor, QueueAwareExecutor} +import com.daml.ledger.api.v2.experimental_features.ExperimentalCommandInspectionService import com.daml.ledger.api.v2.state_service.GetActiveContractsResponse import com.daml.ledger.resources.{Resource, ResourceContext, ResourceOwner} import com.daml.logging.entries.LoggingEntries import com.daml.nameof.NameOf.functionFullName import com.daml.tracing.Telemetry -import com.digitalasset.canton.LfPartyId import com.digitalasset.canton.concurrent.{ ExecutionContextIdlenessExecutorService, FutureSupervisor, @@ -42,6 +42,7 @@ import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.config.LedgerApiServerConfig import com.digitalasset.canton.participant.protocol.SerializableContractAuthenticator import com.digitalasset.canton.platform.LedgerApiServer +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.platform.apiserver.execution.StoreBackedCommandExecutor.AuthenticateContract import com.digitalasset.canton.platform.apiserver.ratelimiting.{ RateLimitingInterceptor, @@ -60,6 +61,7 @@ import com.digitalasset.canton.platform.store.DbSupport.ParticipantDataSourceCon import com.digitalasset.canton.platform.store.dao.events.{ContractLoader, LfValueTranslation} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{FutureUtil, SimpleExecutionQueue} +import com.digitalasset.canton.{LfPackageId, LfPartyId} import io.grpc.{BindableService, ServerInterceptor, ServerServiceDefinition} import io.opentelemetry.api.trace.Tracer import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTelemetry @@ -85,6 +87,8 @@ class StartableStoppableLedgerApiServer( telemetry: Telemetry, futureSupervisor: FutureSupervisor, parameters: ParticipantNodeParameters, + commandProgressTracker: CommandProgressTracker, + excludedPackageIds: Set[LfPackageId], )(implicit executionContext: ExecutionContextIdlenessExecutorService, actorSystem: ActorSystem, @@ -210,6 +214,7 @@ class StartableStoppableLedgerApiServer( for { (inMemoryState, inMemoryStateUpdaterFlow) <- LedgerApiServer.createInMemoryStateAndUpdater( + commandProgressTracker, indexServiceConfig, config.serverConfig.commandService.maxCommandsInFlight, config.metrics, @@ -217,6 +222,7 @@ class StartableStoppableLedgerApiServer( tracer, loggerFactory, ) + timedWriteService = new TimedWriteService(config.syncService, config.metrics) timedReadService = new TimedReadService(config.syncService, config.metrics) dbSupport <- DbSupport .owner( @@ -247,6 +253,7 @@ class StartableStoppableLedgerApiServer( ), highAvailability = config.indexerHaConfig, indexServiceDbDispatcher = Some(dbSupport.dbDispatcher), + excludedPackageIds, ) contractLoader <- { import config.cantonParameterConfig.ledgerApiServerParameters.contractLoader.* @@ -274,18 +281,18 @@ class StartableStoppableLedgerApiServer( tracer = config.tracerProvider.tracer, loggerFactory = loggerFactory, incompleteOffsets = (off, ps, tc) => - timedReadService.incompleteReassignmentOffsets(off, ps.getOrElse(Set.empty))(tc), + timedWriteService.incompleteReassignmentOffsets(off, ps.getOrElse(Set.empty))(tc), contractLoader = contractLoader, - getPackageMetadataSnapshot = timedReadService.getPackageMetadataSnapshot(_), + getPackageMetadataSnapshot = timedWriteService.getPackageMetadataSnapshot(_), lfValueTranslation = new LfValueTranslation( metrics = config.metrics, engineO = Some(config.engine), loadPackage = (packageId, loggingContext) => - timedReadService.getLfArchive(packageId)(loggingContext.traceContext), + timedWriteService.getLfArchive(packageId)(loggingContext.traceContext), loggerFactory = loggerFactory, ), ) - _ = timedReadService.registerInternalStateService(new InternalStateService { + _ = timedWriteService.registerInternalStateService(new InternalStateService { override def activeContracts( partyIds: Set[LfPartyId], validAt: Option[Offset], @@ -316,10 +323,10 @@ class StartableStoppableLedgerApiServer( authenticateContract: AuthenticateContract = c => serializableContractAuthenticator.authenticate(c) - timedWriteService = new TimedWriteService(config.syncService, config.metrics) _ <- ApiServiceOwner( - submissionTracker = inMemoryState.submissionTracker, indexService = indexService, + submissionTracker = inMemoryState.submissionTracker, + commandProgressTracker = commandProgressTracker, userManagementStore = userManagementStore, identityProviderConfigStore = getIdentityProviderConfigStore( dbSupport, @@ -339,8 +346,7 @@ class StartableStoppableLedgerApiServer( maxInboundMessageSize = config.serverConfig.maxInboundMessageSize.unwrap, port = config.serverConfig.port, seeding = config.cantonParameterConfig.ledgerApiServerParameters.contractIdSeeding, - optWriteService = Some(timedWriteService), - readService = timedReadService, + writeService = timedWriteService, healthChecks = new HealthChecks( "read" -> timedReadService, "write" -> (() => config.syncService.currentWriteHealth()), @@ -444,7 +450,9 @@ class StartableStoppableLedgerApiServer( .toList private def getLedgerFeatures: LedgerFeatures = LedgerFeatures( - staticTime = config.testingTimeService.isDefined + staticTime = config.testingTimeService.isDefined, + commandInspectionService = + ExperimentalCommandInspectionService.of(supported = config.enableCommandInspection), ) private def startHttpApiIfEnabled: ResourceOwner[Unit] = diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala index 11bad0c32..ae07654cd 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala @@ -426,7 +426,10 @@ trait MessageDispatcher { this: NamedLogging => ) } yield goodRequest - /** Return only the root hash messages sent to a mediator, along with the set of all mediator recipients */ + /** Return only the root hash messages sent to a mediator, along with the mediator group recipient. + * The mediator group recipient can be `None` if there is no root hash message sent to a mediator group. + * @throws IllegalArgumentException if there are root hash messages that address more than one mediator group. + */ private def filterRootHashMessagesToMediator( rootHashMessages: List[OpenEnvelope[RootHashMessage[SerializedRootHashMessagePayload]]], encryptedViews: List[OpenEnvelope[EncryptedViewMessage[ViewType]]], diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala index 50d837a77..ab18eb1fb 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala @@ -70,6 +70,7 @@ import com.digitalasset.canton.participant.protocol.validation.* import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceAlarm import com.digitalasset.canton.participant.sync.* +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.protocol.WellFormedTransaction.{ WithSuffixesAndMerged, WithoutSuffixes, @@ -123,6 +124,7 @@ class TransactionProcessingSteps( authenticationValidator: AuthenticationValidator, authorizationValidator: AuthorizationValidator, internalConsistencyChecker: InternalConsistencyChecker, + tracker: CommandProgressTracker, protected val loggerFactory: NamedLoggerFactory, futureSupervisor: FutureSupervisor, )(implicit val ec: ExecutionContext) @@ -276,7 +278,7 @@ class TransactionProcessingSteps( ) -> emptyDeduplicationPeriod } TransactionSubmissionTrackingData( - submitterInfo.toCompletionInfo().copy(optDeduplicationPeriod = dedupInfo.some), + submitterInfo.toCompletionInfo.copy(optDeduplicationPeriod = dedupInfo.some), TransactionSubmissionTrackingData.CauseWithTemplate(error), domainId, protocolVersion, @@ -384,12 +386,23 @@ class TransactionProcessingSteps( .mapK(FutureUnlessShutdown.outcomeK) } yield { val batchSize = batch.toProtoVersioned.serializedSize + val numRecipients = batch.allRecipients.size + val numEnvelopes = batch.envelopesCount + tracker + .findHandle( + submitterInfoWithDedupPeriod.commandId, + submitterInfoWithDedupPeriod.applicationId, + submitterInfoWithDedupPeriod.actAs, + submitterInfoWithDedupPeriod.submissionId, + ) + .recordEnvelopeSizes(batchSize, numRecipients, numEnvelopes) + metrics.protocolMessages.confirmationRequestSize.update(batchSize)(MetricsContext.Empty) new PreparedTransactionBatch( batch, request.rootHash, - submitterInfoWithDedupPeriod.toCompletionInfo(), + submitterInfoWithDedupPeriod.toCompletionInfo, ): PreparedBatch } @@ -397,7 +410,7 @@ class TransactionProcessingSteps( rejectionCause: TransactionSubmissionTrackingData.RejectionCause ): Success[Outcome[Either[SubmissionTrackingData, PreparedBatch]]] = { val trackingData = TransactionSubmissionTrackingData( - submitterInfoWithDedupPeriod.toCompletionInfo(), + submitterInfoWithDedupPeriod.toCompletionInfo, rejectionCause, domainId, protocolVersion, @@ -428,7 +441,7 @@ class TransactionProcessingSteps( override def submissionTimeoutTrackingData: SubmissionTrackingData = TransactionSubmissionTrackingData( - submitterInfo.toCompletionInfo().copy(optDeduplicationPeriod = None), + submitterInfo.toCompletionInfo.copy(optDeduplicationPeriod = None), TransactionSubmissionTrackingData.TimeoutCause, domainId, protocolVersion, @@ -1123,7 +1136,6 @@ class TransactionProcessingSteps( meta.commandId.unwrap, Some(meta.dedupPeriod), meta.submissionId, - statistics = None, // Statistics filled by ReadService, so we don't persist them ) Option.when(freshOwnTimelyTx)(completionInfo) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala index 468ad7539..ef7605dba 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessor.scala @@ -37,6 +37,7 @@ import com.digitalasset.canton.participant.protocol.validation.{ import com.digitalasset.canton.participant.store.SyncDomainEphemeralState import com.digitalasset.canton.participant.util.DAMLe import com.digitalasset.canton.participant.util.DAMLe.PackageResolver +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes import com.digitalasset.canton.protocol.* import com.digitalasset.canton.sequencing.client.{SendAsyncClientError, SequencerClient} @@ -60,6 +61,7 @@ class TransactionProcessor( sequencerClient: SequencerClient, inFlightSubmissionTracker: InFlightSubmissionTracker, ephemeral: SyncDomainEphemeralState, + commandProgressTracker: CommandProgressTracker, metrics: TransactionProcessingMetrics, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, @@ -102,6 +104,7 @@ class TransactionProcessor( staticDomainParameters.protocolVersion, loggerFactory, ), + commandProgressTracker, loggerFactory, futureSupervisor, ), diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala index 6e229f89a..12511613e 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala @@ -658,7 +658,6 @@ private[transfer] class TransferInProcessingSteps( commandId = submitterMetadata.commandId, optDeduplicationPeriod = None, submissionId = submitterMetadata.submissionId, - statistics = None, ) ) } yield LedgerSyncEvent.TransferredIn( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala index 4713d2964..3ace78bdc 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala @@ -676,7 +676,6 @@ class TransferOutProcessingSteps( commandId = submitterMetadata.commandId, optDeduplicationPeriod = None, submissionId = submitterMetadata.submissionId, - statistics = None, ) ) } yield LedgerSyncEvent.TransferredOut( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferProcessingSteps.scala index b2df4209f..7f13cf231 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferProcessingSteps.scala @@ -278,7 +278,6 @@ trait TransferProcessingSteps[ commandId = submitterMetadata.commandId, optDeduplicationPeriod = None, submissionId = None, - statistics = None, ) val tse = Option.when(isSubmittingParticipant)( @@ -308,7 +307,6 @@ trait TransferProcessingSteps[ commandId = pendingTransfer.submitterMetadata.commandId, optDeduplicationPeriod = None, submissionId = pendingTransfer.submitterMetadata.submissionId, - statistics = None, ) ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala index 5418d0e5e..bd9494111 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala @@ -112,7 +112,7 @@ class RecipientsValidator[I]( ) (wrongRecipients, goodInputs, errors) = resultsWithSequencingSnapshot - actualWrongRecupients <- { + actualWrongRecipients <- { if (errors.isEmpty) { // The recipients check reported no error. Future.successful(wrongRecipients) @@ -150,7 +150,7 @@ class RecipientsValidator[I]( } } } yield { - (actualWrongRecupients, goodInputs) + (actualWrongRecipients, goodInputs) } } @@ -163,7 +163,7 @@ class RecipientsValidator[I]( ): Future[(Seq[WrongRecipients], Seq[I], RecipientsValidatorErrors)] = { // Used to accumulate all the errors to report later. - // Each error also has an associated flag indicating whether it may be due to a topology change. + // Each error also has an associated flag indicating whether it may be due to a topology change. val errorBuilder = Seq.newBuilder[Error] val rootHashes = inputs.map(viewOfInput(_).rootHash).distinct diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/MultiDomainEventLog.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/MultiDomainEventLog.scala index ae51b704a..462e7965b 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/MultiDomainEventLog.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/MultiDomainEventLog.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.participant.store import cats.data.{EitherT, OptionT} import cats.syntax.option.* import cats.syntax.parallel.* +import com.digitalasset.canton.LedgerSubmissionId import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveLong} @@ -51,7 +52,6 @@ import com.digitalasset.canton.tracing.{HasTraceContext, TraceContext, Traced} import com.digitalasset.canton.util.EitherTUtil import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.{LedgerSubmissionId, LedgerTransactionId} import org.apache.pekko.NotUsed import org.apache.pekko.stream.Materializer import org.apache.pekko.stream.scaladsl.Source @@ -140,11 +140,6 @@ trait MultiDomainEventLog extends AutoCloseable { this: NamedLogging => traceContext: TraceContext ): Future[Map[EventId, (GlobalOffset, TimestampedEvent, CantonTimestamp)]] - /** Find the domain of a committed transaction. */ - def lookupTransactionDomain(transactionId: LedgerTransactionId)(implicit - traceContext: TraceContext - ): OptionT[Future, DomainId] - /** Yields the greatest local offsets for the underlying [[SingleDimensionEventLog]] with global offset less than * or equal to `upToInclusive`. * diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SerializableLedgerSyncEvent.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SerializableLedgerSyncEvent.scala index 601f9f7ad..1a70ae4ff 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SerializableLedgerSyncEvent.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SerializableLedgerSyncEvent.scala @@ -610,13 +610,8 @@ final case class SerializableCompletionInfo(completionInfo: CompletionInfo) { commandId, deduplicateUntil, submissionId, - statistics, ) = completionInfo - require( - statistics.isEmpty, - "Statistics are only set before emitting CompletionInfo in CantonSyncService", - ) v30.CompletionInfo( actAs, applicationId, @@ -645,7 +640,6 @@ object SerializableCompletionInfo { commandId, deduplicateUntil, submissionId, - statistics = None, ) } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala index a2a2e251f..40a77cc98 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbMultiDomainEventLog.scala @@ -10,7 +10,6 @@ import cats.syntax.parallel.* import com.daml.metrics.api.MetricsContext import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.LedgerTransactionId import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong, PositiveInt} import com.digitalasset.canton.data.CantonTimestamp @@ -31,7 +30,6 @@ import com.digitalasset.canton.participant.store.MultiDomainEventLog.{ } import com.digitalasset.canton.participant.store.db.DbMultiDomainEventLog.* import com.digitalasset.canton.participant.store.{EventLogId, MultiDomainEventLog, TransferStore} -import com.digitalasset.canton.participant.sync.TimestampedEvent.TransactionEventId import com.digitalasset.canton.participant.sync.{LedgerSyncEvent, TimestampedEvent} import com.digitalasset.canton.participant.{GlobalOffset, LocalOffset, RequestOffset} import com.digitalasset.canton.pekkostreams.dispatcher.Dispatcher @@ -43,10 +41,9 @@ import com.digitalasset.canton.resource.DbStorage.Implicits.{ getResultPackageId as _, } import com.digitalasset.canton.resource.DbStorage.Profile +import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.store.db.DbDeserializationException -import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore} import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* @@ -519,22 +516,6 @@ class DbMultiDomainEventLog private[db] ( case _ => Future.successful(Map.empty) } - override def lookupTransactionDomain(transactionId: LedgerTransactionId)(implicit - traceContext: TraceContext - ): OptionT[Future, DomainId] = { - storage - .querySingle( - sql"""select log_id from par_event_log where event_id = ${TransactionEventId( - transactionId - )}""" - .as[Int] - .headOption, - functionFullName, - ) - .flatMap(idx => IndexedDomain.fromDbIndexOT("event_log", indexedStringStore)(idx)) - .map(_.domainId) - } - private def lastLocalOffsetBeforeOrAt[T <: LocalOffset]( eventLogId: EventLogId, upToInclusive: Option[GlobalOffset], diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryMultiDomainEventLog.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryMultiDomainEventLog.scala index d3e1ba39f..79e4cae16 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryMultiDomainEventLog.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryMultiDomainEventLog.scala @@ -7,7 +7,6 @@ import cats.data.OptionT import cats.syntax.foldable.* import cats.syntax.parallel.* import com.daml.metrics.api.MetricsContext -import com.digitalasset.canton.LedgerTransactionId import com.digitalasset.canton.concurrent.{DirectExecutionContext, FutureSupervisor} import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, NonNegativeLong} @@ -31,10 +30,6 @@ import com.digitalasset.canton.participant.event.RecordOrderPublisher.{ PendingPublish, } import com.digitalasset.canton.participant.metrics.ParticipantMetrics -import com.digitalasset.canton.participant.store.EventLogId.{ - DomainEventLogId, - ParticipantEventLogId, -} import com.digitalasset.canton.participant.store.MultiDomainEventLog.* import com.digitalasset.canton.participant.store.{ EventLogId, @@ -43,7 +38,7 @@ import com.digitalasset.canton.participant.store.{ SingleDimensionEventLog, TransferStore, } -import com.digitalasset.canton.participant.sync.TimestampedEvent.{EventId, TransactionEventId} +import com.digitalasset.canton.participant.sync.TimestampedEvent.EventId import com.digitalasset.canton.participant.sync.{ LedgerSyncEvent, SyncDomainPersistentStateLookup, @@ -60,7 +55,6 @@ import com.digitalasset.canton.pekkostreams.dispatcher.SubSource.RangeSource import com.digitalasset.canton.protocol.TargetDomainId import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* @@ -377,14 +371,6 @@ class InMemoryMultiDomainEventLog( .map(_.toMap) } - override def lookupTransactionDomain( - transactionId: LedgerTransactionId - )(implicit traceContext: TraceContext): OptionT[Future, DomainId] = - byEventId(namedLoggingContext)(TransactionEventId(transactionId)).subflatMap { - case (DomainEventLogId(id), _localOffset) => Some(id.item) - case (ParticipantEventLogId(_), _localOffset) => None - } - override def lastLocalOffsetBeforeOrAt( eventLogId: EventLogId, upToInclusive: Option[GlobalOffset] = None, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala index 6f6dcfbc8..30efdd523 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala @@ -36,7 +36,7 @@ import com.digitalasset.canton.ledger.api.health.HealthStatus import com.digitalasset.canton.ledger.error.CommonErrors import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.ledger.participant.state -import com.digitalasset.canton.ledger.participant.state.ReadService.ConnectedDomainResponse +import com.digitalasset.canton.ledger.participant.state.WriteService.ConnectedDomainResponse import com.digitalasset.canton.ledger.participant.state.* import com.digitalasset.canton.lifecycle.{ FlagCloseable, @@ -56,7 +56,6 @@ import com.digitalasset.canton.participant.admin.inspection.{ SyncStateInspection, } import com.digitalasset.canton.participant.admin.repair.RepairService -import com.digitalasset.canton.participant.admin.workflows.java.canton import com.digitalasset.canton.participant.domain.* import com.digitalasset.canton.participant.event.RecordOrderPublisher import com.digitalasset.canton.participant.metrics.ParticipantMetrics @@ -90,7 +89,10 @@ import com.digitalasset.canton.participant.sync.SyncServiceError.{ import com.digitalasset.canton.participant.topology.* import com.digitalasset.canton.participant.topology.client.MissingKeysAlerter import com.digitalasset.canton.participant.util.DAMLe -import com.digitalasset.canton.platform.apiserver.execution.AuthorityResolver +import com.digitalasset.canton.platform.apiserver.execution.{ + AuthorityResolver, + CommandProgressTracker, +} import com.digitalasset.canton.platform.store.packagemeta.PackageMetadata import com.digitalasset.canton.protocol.* import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException @@ -188,37 +190,6 @@ class CantonSyncService( participantNodePersistentState.value.settingsStore.settings.maxDeduplicationDuration .getOrElse(throw new RuntimeException("Max deduplication duration is not available")) - // Augment event with transaction statistics "as late as possible" as stats are redundant data and so that - // we don't need to persist stats and deal with versioning stats changes. Also every event is usually consumed - // only once. - private[sync] def augmentTransactionStatistics( - e: LedgerSyncEvent - ): LedgerSyncEvent = e match { - case e: LedgerSyncEvent.TransactionAccepted => - e.copy(completionInfoO = - e.completionInfoO.map(completionInfo => - completionInfo.copy(statistics = - Some(LedgerTransactionNodeStatistics(e.transaction, excludedPackageIds)) - ) - ) - ) - case e => e - } - - private val excludedPackageIds: Set[LfPackageId] = - if (parameters.excludeInfrastructureTransactions) { - Set( - canton.internal.ping.Ping.TEMPLATE_ID, - canton.internal.bong.BongProposal.TEMPLATE_ID, - canton.internal.bong.Bong.TEMPLATE_ID, - canton.internal.bong.Merge.TEMPLATE_ID, - canton.internal.bong.Explode.TEMPLATE_ID, - canton.internal.bong.Collapse.TEMPLATE_ID, - ).map(x => LfPackageId.assertFromString(x.getPackageId)) - } else { - Set.empty[LfPackageId] - } - private type ConnectionListener = Traced[DomainId] => Unit // Listeners to domain connections @@ -308,6 +279,10 @@ class CantonSyncService( syncDomainPersistentStateManager.get(domainId.unwrap).map(_.transferStore), protocolVersionFor = protocolVersionGetter, ) + val commandProgressTracker: CommandProgressTracker = + if (parameters.commandProgressTracking.enabled) + new CommandProgressTrackerImpl(parameters.commandProgressTracking, clock, loggerFactory) + else CommandProgressTracker.NoOp private val commandDeduplicator = new CommandDeduplicatorImpl( participantNodePersistentState.map(_.commandDeduplicationStore), @@ -406,6 +381,19 @@ class CantonSyncService( loggerFactory, ) + private def trackSubmission( + submitterInfo: SubmitterInfo, + transaction: LfSubmittedTransaction, + ): Unit = + commandProgressTracker + .findHandle( + submitterInfo.commandId, + submitterInfo.applicationId, + submitterInfo.actAs, + submitterInfo.submissionId, + ) + .recordTransactionImpact(transaction) + // Submit a transaction (write service implementation) override def submitTransaction( submitterInfo: SubmitterInfo, @@ -422,6 +410,7 @@ class CantonSyncService( withSpan("CantonSyncService.submitTransaction") { implicit traceContext => span => span.setAttribute("command_id", submitterInfo.commandId) logger.debug(s"Received submit-transaction ${submitterInfo.commandId} from ledger-api server") + trackSubmission(submitterInfo, transaction) submitTransactionF( submitterInfo, optDomainId, @@ -637,7 +626,6 @@ class CantonSyncService( .subscribe(beginStartingAt) .mapConcat { case (offset, event) => event - .map(augmentTransactionStatistics) .traverse(_.toDamlUpdate) .map { e => logger.debug(show"Emitting event at offset $offset. Event: ${event.value}")( @@ -1405,6 +1393,7 @@ class CantonSyncService( missingKeysAlerter, transferCoordination, inFlightSubmissionTracker, + commandProgressTracker, clock, domainMetrics, futureSupervisor, @@ -1755,8 +1744,8 @@ class CantonSyncService( } override def getConnectedDomains( - request: ReadService.ConnectedDomainRequest - )(implicit traceContext: TraceContext): Future[ReadService.ConnectedDomainResponse] = { + request: WriteService.ConnectedDomainRequest + )(implicit traceContext: TraceContext): Future[WriteService.ConnectedDomainResponse] = { def getSnapshot(domainAlias: DomainAlias, domainId: DomainId): Future[TopologySnapshot] = syncCrypto.ips .forDomain(domainId) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CommandProgressTrackerImpl.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CommandProgressTrackerImpl.scala new file mode 100644 index 000000000..fd53d1dd9 --- /dev/null +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CommandProgressTrackerImpl.scala @@ -0,0 +1,353 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.sync + +import com.daml.ledger.api.v2.admin.command_inspection_service.CommandState +import com.daml.ledger.api.v2.admin.command_inspection_service.GetCommandStatusResponse.CommandStatus.{ + CommandUpdates, + RequestStatistics, +} +import com.daml.ledger.api.v2.commands.Command +import com.daml.ledger.api.v2.value.Identifier +import com.daml.lf.data.Ref.TypeConName +import com.daml.lf.transaction.Node.LeafOnlyAction +import com.daml.lf.transaction.Transaction.ChildrenRecursion +import com.daml.lf.transaction.{GlobalKeyWithMaintainers, Node} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.discard.Implicits.DiscardOps +import com.digitalasset.canton.ledger.api.util.LfEngineToApi +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.sync.CommandProgressTrackerConfig.{ + defaultMaxFailed, + defaultMaxPending, + defaultMaxSucceeded, +} +import com.digitalasset.canton.platform.apiserver.execution.{ + CommandProgressTracker, + CommandResultHandle, + CommandStatus, +} +import com.digitalasset.canton.platform.store.CompletionFromTransaction +import com.digitalasset.canton.platform.store.interfaces.TransactionLogUpdate +import com.digitalasset.canton.protocol.LfSubmittedTransaction +import com.digitalasset.canton.time.Clock +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import io.grpc.StatusRuntimeException +import monocle.macros.syntax.lens.* + +import java.util.concurrent.atomic.AtomicReference +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future} + +final case class CommandProgressTrackerConfig( + enabled: Boolean = true, + maxFailed: NonNegativeInt = defaultMaxFailed, + maxPending: NonNegativeInt = defaultMaxPending, + maxSucceeded: NonNegativeInt = defaultMaxSucceeded, +) + +object CommandProgressTrackerConfig { + lazy val defaultMaxFailed: NonNegativeInt = NonNegativeInt.tryCreate(100) + lazy val defaultMaxPending: NonNegativeInt = NonNegativeInt.tryCreate(1000) + lazy val defaultMaxSucceeded: NonNegativeInt = NonNegativeInt.tryCreate(100) +} + +@SuppressWarnings(Array("com.digitalasset.canton.RequireBlocking")) +class CommandProgressTrackerImpl( + config: CommandProgressTrackerConfig, + clock: Clock, + val loggerFactory: NamedLoggerFactory, +)(implicit executionContext: ExecutionContext) + extends CommandProgressTracker + with NamedLogging { + + private case class MyCommandResultHandle(key: CommandKey, initial: CommandStatus) + extends CommandResultHandle { + val ref = new AtomicReference[CommandStatus](initial) + private def updateWithStatus( + err: com.google.rpc.status.Status, + state: CommandState, + ): Unit = + ref.updateAndGet { x => + x + .focus(_.completion.status) + .replace(Some(err)) + .copy( + state = state, + completed = Some(clock.now), + ) + }.discard + + private def processSyncErr(err: com.google.rpc.status.Status): Unit = { + // remove from pending + lock.synchronized(pending.remove(key)).foreach { cur => + if (config.maxFailed.value > 0) { + updateWithStatus(err, CommandState.COMMAND_STATE_FAILED) + addToCollection(cur.ref.get(), failed, config.maxFailed.value) + } + } + } + + def failedSync(err: StatusRuntimeException): Unit = { + val tmp = + com.google.rpc.status.Status.fromJavaProto(io.grpc.protobuf.StatusProto.fromThrowable(err)) + processSyncErr(tmp) + } + def internalErrorSync(err: Throwable): Unit = + processSyncErr(encodeInternalError(err)) + private def encodeInternalError(err: Throwable): com.google.rpc.status.Status = + com.google.rpc.status.Status + .of(com.google.rpc.Code.INTERNAL_VALUE, err.getMessage, Seq.empty) + + def failedAsync( + status: Option[com.google.rpc.status.Status] + ): Unit = + updateWithStatus( + status.getOrElse( + encodeInternalError(new IllegalStateException("Missing status upon failed completion")) + ), + CommandState.COMMAND_STATE_FAILED, + ) + + def succeeded(): Unit = + updateWithStatus(CompletionFromTransaction.OkStatus, CommandState.COMMAND_STATE_SUCCEEDED) + + def recordEnvelopeSizes(requestSize: Int, numRecipients: Int, numEnvelopes: Int): Unit = + ref + .updateAndGet( + _.copy( + requestStatistics = RequestStatistics( + requestSize = requestSize, + recipients = numRecipients, + envelopes = numEnvelopes, + ) + ) + ) + .discard + + def recordTransactionImpact( + transaction: LfSubmittedTransaction + ): Unit = { + val creates = mutable.ListBuffer.empty[CommandUpdates.Contract] + val archives = mutable.ListBuffer.empty[CommandUpdates.Contract] + final case class Stats( + exercised: Int = 0, + fetched: Int = 0, + lookedUpByKey: Int = 0, + ) + def mk( + templateId: TypeConName, + coid: String, + keyOpt: Option[GlobalKeyWithMaintainers], + ): CommandUpdates.Contract = { + CommandUpdates.Contract( + templateId = Some( + Identifier( + templateId.packageId, + templateId.qualifiedName.module.toString, + templateId.qualifiedName.name.toString, + ) + ), + contractId = coid, + contractKey = + keyOpt.flatMap(x => LfEngineToApi.lfValueToApiValue(verbose = false, x.value).toOption), + ) + } + def leaf(leafOnlyAction: LeafOnlyAction, stats: Stats): Stats = + leafOnlyAction match { + case c: Node.Create => + creates += mk(c.templateId, c.coid.coid, c.keyOpt) + stats + case _: Node.Fetch => stats.copy(fetched = stats.fetched + 1) + case _: Node.LookupByKey => stats.copy(lookedUpByKey = stats.lookedUpByKey + 1) + } + val stats = transaction.foldInExecutionOrder(Stats())( + exerciseBegin = (acc, _, exerciseNode) => { + if (exerciseNode.consuming) { + archives += mk( + exerciseNode.templateId, + exerciseNode.targetCoid.coid, + exerciseNode.keyOpt, + ) + } + (acc.copy(exercised = acc.exercised + 1), ChildrenRecursion.DoRecurse) + }, + rollbackBegin = (acc, _, _) => { + (acc, ChildrenRecursion.DoNotRecurse) + }, + leaf = (acc, _, leafNode) => leaf(leafNode, acc), + exerciseEnd = (acc, _, _) => acc, + rollbackEnd = (acc, _, _) => acc, + ) + ref + .updateAndGet( + _.copy( + updates = CommandUpdates( + created = creates.toList, + archived = archives.toList, + exercised = stats.exercised, + fetched = stats.fetched, + lookedUpByKey = stats.lookedUpByKey, + ) + ) + ) + .discard + } + + } + + // command key is (commandId, applicationId, actAs, submissionId) + private type CommandKey = (String, String, Set[String], Option[String]) + + private val pending = new mutable.LinkedHashMap[CommandKey, MyCommandResultHandle]() + private val failed = new mutable.ArrayDeque[CommandStatus](config.maxFailed.value) + private val succeeded = new mutable.ArrayDeque[CommandStatus](config.maxSucceeded.value) + private val lock = new Object() + + private def findCommands( + commandIdPrefix: String, + limit: Int, + collection: => Iterable[CommandStatus], + ): Seq[CommandStatus] = { + lock.synchronized { + collection.filter(_.completion.commandId.startsWith(commandIdPrefix)).take(limit).toSeq + } + } + + override def findCommandStatus( + commandIdPrefix: String, + state: CommandState, + limit: Int, + ): Future[Seq[CommandStatus]] = Future { + val pool = state match { + case CommandState.COMMAND_STATE_UNSPECIFIED | CommandState.Unrecognized(_) => + findCommands(commandIdPrefix, limit, failed) ++ + findCommands(commandIdPrefix, limit, pending.values.map(_.ref.get())) ++ + findCommands(commandIdPrefix, limit, succeeded) + case CommandState.COMMAND_STATE_FAILED => findCommands(commandIdPrefix, limit, failed) + case CommandState.COMMAND_STATE_PENDING => + findCommands(commandIdPrefix, limit, pending.values.map(_.ref.get())) + case CommandState.COMMAND_STATE_SUCCEEDED => findCommands(commandIdPrefix, limit, succeeded) + } + pool.take(limit) + } + + override def registerCommand( + commandId: String, + submissionId: Option[String], + applicationId: String, + commands: Seq[Command], + actAs: Set[String], + )(implicit traceContext: TraceContext): CommandResultHandle = if ( + pending.size >= config.maxPending.value + ) { + CommandResultHandle.NoOp + } else { + val key = (commandId, applicationId, actAs, submissionId) + logger.debug(s"Registering handle for $key") + val status = CommandStatus( + started = clock.now, + completed = None, + completion = CompletionFromTransaction.toApiCompletion( + commandId = commandId, + transactionId = "", + applicationId = applicationId, + traceContext = traceContext, + optStatus = None, + optSubmissionId = submissionId, + optDeduplicationOffset = None, + optDeduplicationDurationSeconds = None, + optDeduplicationDurationNanos = None, + ), + state = CommandState.COMMAND_STATE_PENDING, + commands = commands, + requestStatistics = RequestStatistics(), + updates = CommandUpdates(), + ) + val handle = MyCommandResultHandle(key, status) + val existing = lock.synchronized { + pending.put(key, handle) + } + existing.foreach { prev => + // in theory, this can happen if an app sends the same command twice, so it's not + // a warning ... + logger.info(s"Duplicate command registration for ${prev.ref.get()}") + } + handle + } + + override def findHandle( + commandId: String, + applicationId: String, + actAs: Seq[String], + submissionId: Option[String], + ): CommandResultHandle = { + lock.synchronized { + pending.getOrElse( + (commandId, applicationId, actAs.toSet, submissionId), + CommandResultHandle.NoOp, + ) + } + } + + private def addToCollection( + commandStatus: CommandStatus, + collection: mutable.ArrayDeque[CommandStatus], + maxSize: Int, + ): Unit = { + lock.synchronized { + collection.prepend(commandStatus) + if (collection.size > maxSize) { + collection.removeLast().discard + } + } + } + + override def processLedgerUpdate(update: Traced[TransactionLogUpdate]): Unit = + update.value match { + case TransactionLogUpdate.TransactionRejected(_offset, completionDetails) => + completionDetails.completionStreamResponse.completion.foreach { completionInfo => + val key = ( + completionInfo.commandId, + completionInfo.applicationId, + completionDetails.submitters, + Option.when(completionInfo.submissionId.nonEmpty)(completionInfo.submissionId), + ) + // remove from pending + lock.synchronized(pending.remove(key)).foreach { cur => + if (config.maxFailed.value > 0) { + cur.failedAsync(completionInfo.status) + addToCollection(cur.ref.get(), failed, config.maxFailed.value) + } + } + } + case TransactionLogUpdate.TransactionAccepted( + _transactionId, + commandId, + _workflowId, + _effectiveAt, + _offset, + _events, + Some(completionDetails), + _domainId, + _recordTime, + ) => + completionDetails.completionStreamResponse.completion.foreach { completionInfo => + val key = ( + commandId, + completionInfo.applicationId, + completionDetails.submitters, + Option.when(completionInfo.submissionId.nonEmpty)(completionInfo.submissionId), + ) + // remove from pending + lock.synchronized(pending.remove(key)).foreach { cur => + // mark as done + cur.succeeded() + addToCollection(cur.ref.get(), succeeded, config.maxSucceeded.value) + } + } + case _ => + } + +} diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala index 37e8fae2c..4eba6a2b7 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala @@ -64,7 +64,10 @@ import com.digitalasset.canton.participant.topology.client.MissingKeysAlerter import com.digitalasset.canton.participant.traffic.ParticipantTrafficControlSubscriber import com.digitalasset.canton.participant.util.DAMLe.PackageResolver import com.digitalasset.canton.participant.util.{DAMLe, TimeOfChange} -import com.digitalasset.canton.platform.apiserver.execution.AuthorityResolver +import com.digitalasset.canton.platform.apiserver.execution.{ + AuthorityResolver, + CommandProgressTracker, +} import com.digitalasset.canton.protocol.WellFormedTransaction.WithoutSuffixes import com.digitalasset.canton.protocol.* import com.digitalasset.canton.sequencing.* @@ -120,6 +123,7 @@ class SyncDomain( missingKeysAlerter: MissingKeysAlerter, transferCoordination: TransferCoordination, inFlightSubmissionTracker: InFlightSubmissionTracker, + commandProgressTracker: CommandProgressTracker, messageDispatcherFactory: MessageDispatcher.Factory[MessageDispatcher], clock: Clock, metrics: SyncDomainMetrics, @@ -186,6 +190,7 @@ class SyncDomain( sequencerClient, inFlightSubmissionTracker, ephemeral, + commandProgressTracker, metrics.transactionProcessing, timeouts, loggerFactory, @@ -1003,6 +1008,7 @@ object SyncDomain { missingKeysAlerter: MissingKeysAlerter, transferCoordination: TransferCoordination, inFlightSubmissionTracker: InFlightSubmissionTracker, + commandProgressTracker: CommandProgressTracker, clock: Clock, syncDomainMetrics: SyncDomainMetrics, futureSupervisor: FutureSupervisor, @@ -1029,6 +1035,7 @@ object SyncDomain { missingKeysAlerter: MissingKeysAlerter, transferCoordination: TransferCoordination, inFlightSubmissionTracker: InFlightSubmissionTracker, + commandProgressTracker: CommandProgressTracker, clock: Clock, syncDomainMetrics: SyncDomainMetrics, futureSupervisor: FutureSupervisor, @@ -1052,6 +1059,7 @@ object SyncDomain { missingKeysAlerter, transferCoordination, inFlightSubmissionTracker, + commandProgressTracker, MessageDispatcher.DefaultFactory, clock, syncDomainMetrics, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala index 5cba3d46a..2b2e90778 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala @@ -144,7 +144,7 @@ class ParticipantTopologyDispatcher( domains.values.toList .flatMap(_.forgetNE) .collect { case outbox: StoreBasedDomainOutbox => outbox } - .parTraverse(_.newTransactionsAddedToAuthorizedStore(timestamp, num)) + .parTraverse(_.newTransactionsAdded(timestamp, num)) .map(_ => ()) } }) @@ -304,7 +304,7 @@ class ParticipantTopologyDispatcher( timestamp: CantonTimestamp, transactions: Seq[SignedTopologyTransaction[TopologyChangeOp, TopologyMapping]], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = - queueBasedDomainOutbox.newTransactionsAddedToAuthorizedStore( + queueBasedDomainOutbox.newTransactionsAdded( timestamp, transactions.size, ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala index 072a71103..0545bfe46 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/util/DAMLe.scala @@ -11,7 +11,8 @@ import com.daml.lf.data.{ImmArray, Ref, Time} import com.daml.lf.engine.* import com.daml.lf.interpretation.Error as LfInterpretationError import com.daml.lf.language.Ast.Package -import com.daml.lf.language.{LanguageMajorVersion, LanguageVersion} +import com.daml.lf.language.LanguageVersion +import com.daml.lf.language.LanguageVersion.v2_dev import com.daml.lf.transaction.{ContractKeyUniquenessMode, TransactionVersion, Versioned} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -41,6 +42,7 @@ import scala.util.{Failure, Success} object DAMLe { def newEngine( enableLfDev: Boolean, + enableLfBeta: Boolean, enableStackTraces: Boolean, profileDir: Option[Path] = None, iterationsBetweenInterruptions: Long = @@ -48,14 +50,10 @@ object DAMLe { ): Engine = new Engine( EngineConfig( - allowedLanguageVersions = - if (enableLfDev) - LanguageVersion.AllVersions(LanguageMajorVersion.V2) - else - VersionRange( - LanguageVersion.v2_1, - LanguageVersion.StableVersions(LanguageMajorVersion.V2).max, - ), + allowedLanguageVersions = VersionRange( + LanguageVersion.v2_1, + maxVersion(enableLfDev, enableLfBeta), + ), // The package store contains only validated packages, so we can skip validation upon loading packageValidation = false, stackTraceMode = enableStackTraces, @@ -66,6 +64,11 @@ object DAMLe { ) ) + private def maxVersion(enableLfDev: Boolean, enableLfBeta: Boolean) = + if (enableLfDev) v2_dev + else if (enableLfBeta) LanguageVersion.EarlyAccessVersions(LanguageVersion.Major.V2).max + else LanguageVersion.StableVersions(LanguageVersion.Major.V2).max + /** Resolves packages by [[com.daml.lf.data.Ref.PackageId]]. * The returned packages must have been validated * so that [[com.daml.lf.engine.Engine]] can skip validation. diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/DefaultParticipantStateValues.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/DefaultParticipantStateValues.scala index 36f78e74a..bec0f0e6e 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/DefaultParticipantStateValues.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/DefaultParticipantStateValues.scala @@ -43,7 +43,6 @@ object DefaultParticipantStateValues { commandId: CommandId = DefaultDamlValues.commandId(), optDeduplicationPeriod: Option[DeduplicationPeriod] = Some(deduplicationDuration), submissionId: Option[Ref.SubmissionId] = DefaultDamlValues.submissionId().some, - statistics: Option[LedgerTransactionNodeStatistics] = None, ): CompletionInfo = CompletionInfo( actAs, @@ -51,7 +50,6 @@ object DefaultParticipantStateValues { commandId.unwrap, optDeduplicationPeriod, submissionId, - statistics, ) def transactionMeta( diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/GrpcTrafficControlServiceTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/GrpcTrafficControlServiceTest.scala index da1f796eb..dd01b4190 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/GrpcTrafficControlServiceTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/GrpcTrafficControlServiceTest.scala @@ -42,6 +42,7 @@ class GrpcTrafficControlServiceTest extraTrafficPurchased = NonNegativeLong.tryCreate(5), extraTrafficConsumed = NonNegativeLong.tryCreate(6), baseTrafficRemainder = NonNegativeLong.tryCreate(7), + lastConsumedCost = NonNegativeLong.tryCreate(8), timestamp = CantonTimestamp.now(), serial = Some(PositiveInt.one), ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala index 895df274e..1def373a6 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageServiceTest.scala @@ -79,7 +79,7 @@ class PackageServiceTest val packageDependencyResolver = new PackageDependencyResolver(packageStore, processingTimeouts, loggerFactory) private val engine = - DAMLe.newEngine(enableLfDev = false, enableStackTraces = false) + DAMLe.newEngine(enableLfDev = false, enableLfBeta = false, enableStackTraces = false) val sut: PackageService = PackageService .createAndInitialize( diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala index 82f202f5d..334d1a4a3 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala @@ -85,6 +85,8 @@ trait MessageDispatcherTest { private val otherParticipant = ParticipantId.tryFromProtoPrimitive("PAR::other::participant") private val mediatorGroup = MediatorGroupRecipient(MediatorGroupIndex.zero) private val mediatorGroup2 = MediatorGroupRecipient(MediatorGroupIndex.one) + private val partyId = PartyId.tryFromProtoPrimitive("party::default") + private val otherPartyId = PartyId.tryFromProtoPrimitive("party::other") private val encryptedRandomnessTest = Encrypted.fromByteString[SecureRandomness](ByteString.EMPTY) @@ -806,6 +808,45 @@ trait MessageDispatcherTest { error.getMessage should include(show"No processor for view type $UnknownTestViewType") } + "ignore protocol messages for foreign domains" in { + val sut = mk() + val sc = SequencerCounter(1) + val ts = CantonTimestamp.ofEpochSecond(1) + val txForeignDomain = TopologyTransactionsBroadcast.create( + DomainId.tryFromString("foo::bar"), + Seq( + Broadcast( + String255.tryCreate("some request"), + List(factory.ns1k1_k1), + ) + ), + testedProtocolVersion, + ) + val event = + mkDeliver( + Batch.of(testedProtocolVersion, txForeignDomain -> Recipients.cc(participantId)), + sc, + ts, + ) + + loggerFactory.assertLoggedWarningsAndErrorsSeq( + handle(sut, event) { + verify(sut.topologyProcessor).apply( + isEq(sc), + isEq(SequencedTime(ts)), + isEq(Traced(List.empty)), + ) + + checkTicks(sut, sc, ts) + }.futureValue, + logEntries => { + logEntries should not be empty + forEvery(logEntries) { + _.warningMessage should include("Received messages with wrong domain IDs") + } + }, + ) + } def request( view: EncryptedViewMessage[ViewType], @@ -1029,6 +1070,35 @@ trait MessageDispatcherTest { testTopologyTimestamp, SerializedRootHashMessagePayload.empty, ) + + def mkRootHashMessageRecipients(recipients: NonEmpty[Seq[Recipient]]): Recipients = + Recipients.recipientGroups( + recipients.map(recipient => NonEmpty(Set, recipient, mediatorGroup)) + ) + + val goodBatches = List( + Batch.of[ProtocolMessage]( + testedProtocolVersion, + view -> Recipients.cc(participantId), + rootHashMessage -> Recipients.cc(MemberRecipient(participantId), mediatorGroup), + commitment -> Recipients.cc(participantId), + ) -> Seq(), + Batch.of[ProtocolMessage]( + testedProtocolVersion, + view -> Recipients.cc(participantId), + rootHashMessage -> Recipients.cc(ParticipantsOfParty(partyId), mediatorGroup), + commitment -> Recipients.cc(participantId), + ) -> Seq(), + Batch.of[ProtocolMessage]( + testedProtocolVersion, + view -> Recipients.cc(participantId), + rootHashMessage -> mkRootHashMessageRecipients( + NonEmpty(Seq, ParticipantsOfParty(partyId), ParticipantsOfParty(otherPartyId)) + ), + commitment -> Recipients.cc(participantId), + ) -> Seq(), + ) + val badBatches = List( Batch.of[ProtocolMessage]( testedProtocolVersion, @@ -1039,10 +1109,18 @@ trait MessageDispatcherTest { Batch.of[ProtocolMessage]( testedProtocolVersion, view -> Recipients.cc(participantId), - rootHashMessage -> Recipients.cc(MemberRecipient(participantId), mediatorGroup), + rootHashMessage -> mkRootHashMessageRecipients( + NonEmpty( + Seq, + MemberRecipient(participantId), + ParticipantsOfParty(partyId), + ParticipantsOfParty(otherPartyId), + ) + ), commitment -> Recipients.cc(participantId), - // We used to include a DomainTopologyTransactionMessage which no longer exist in 3.x - ) -> Seq(), + ) -> Seq( + "The root hash message has more than one recipient group, not all using group addressing." + ), Batch.of[ProtocolMessage]( testedProtocolVersion, view -> Recipients.cc(participantId), @@ -1063,13 +1141,15 @@ trait MessageDispatcherTest { rootHashMessage -> Recipients .cc(MemberRecipient(participantId), MemberRecipient(otherParticipant), mediatorGroup2), ) -> Seq( - "The root hash message has an invalid recipient group." + "The root hash message has invalid recipient groups." ), ) + val batchesToTest = goodBatches ++ badBatches + // sequentially process the test cases so that the log messages don't interfere MonadUtil - .sequentialTraverse_(badBatches.zipWithIndex) { case ((batch, alarms), index) => + .sequentialTraverse_(batchesToTest.zipWithIndex) { case ((batch, alarms), index) => val initRc = RequestCounter(index) val sut = mk(initRc = initRc) val sc = SequencerCounter(index) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala index 51bac2270..bd934258e 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala @@ -415,7 +415,6 @@ class ProtocolProcessorTest changeId.commandId, None, Some(subId), - None, ), TransactionSubmissionTrackingData.TimeoutCause, domain, diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionsToEventsTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionsToEventsTest.scala index 4999894d3..8f002f0c3 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionsToEventsTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TopologyTransactionsToEventsTest.scala @@ -30,7 +30,7 @@ class TopologyTransactionsToEventsTest participants: List[ParticipantId], ): SignedTopologyTransaction[Replace, PartyToParticipant] = { - val mapping = PartyToParticipant( + val mapping = PartyToParticipant.tryCreate( partyId, None, PositiveInt.one, diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala index d4256a381..24c47b833 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingStepsTest.scala @@ -13,6 +13,7 @@ import com.digitalasset.canton.participant.protocol.TransactionProcessor.Transac import com.digitalasset.canton.participant.protocol.submission.TransactionConfirmationRequestFactory import com.digitalasset.canton.participant.protocol.validation.* import com.digitalasset.canton.participant.store.ContractStore +import com.digitalasset.canton.platform.apiserver.execution.CommandProgressTracker import com.digitalasset.canton.protocol.{ContractMetadata, LfContractId, SerializableContract} import com.digitalasset.canton.topology.{DomainId, ParticipantId, UniqueIdentifier} import org.scalatest.Assertion @@ -51,6 +52,7 @@ class TransactionProcessingStepsTest extends AsyncWordSpec with BaseTest { defaultStaticDomainParameters.protocolVersion, loggerFactory, ), + CommandProgressTracker.NoOp, loggerFactory = loggerFactory, FutureSupervisor.Noop, ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/DAMLeTestInstance.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/DAMLeTestInstance.scala index 35a3e84e7..7e67c0d86 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/DAMLeTestInstance.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/DAMLeTestInstance.scala @@ -37,7 +37,7 @@ object DAMLeTestInstance { )(implicit ec: ExecutionContext): DAMLe = { val pureCrypto = new SymbolicPureCrypto val engine = - DAMLe.newEngine(enableLfDev = false, enableStackTraces = false) + DAMLe.newEngine(enableLfDev = false, enableLfBeta = false, enableStackTraces = false) val timeouts = ProcessingTimeout() val packageDependencyResolver = new PackageDependencyResolver( diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala index 500a1a324..824492db5 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/CantonSyncServiceTest.scala @@ -3,23 +3,17 @@ package com.digitalasset.canton.participant.sync +import cats.Eval import cats.data.EitherT -import cats.implicits.* -import cats.{Eval, Id} -import com.daml.lf.data.{ImmArray, Ref} -import com.daml.lf.transaction.test.{TestNodeBuilder, TransactionBuilder, TreeTransactionBuilder} -import com.daml.lf.transaction.{CommittedTransaction, VersionedTransaction} -import com.daml.lf.value.Value.ValueRecord import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.CantonRequireTypes.String255 import com.digitalasset.canton.config.TestingConfigInternal import com.digitalasset.canton.crypto.SyncCryptoApiProvider -import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.ledger.participant.state.ChangeId import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.SuppressingLogger -import com.digitalasset.canton.participant.admin.workflows.java.PackageID +import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.admin.{PackageService, ResourceManagementService} import com.digitalasset.canton.participant.domain.{DomainAliasManager, DomainRegistry} import com.digitalasset.canton.participant.metrics.ParticipantTestMetrics @@ -31,7 +25,6 @@ import com.digitalasset.canton.participant.store.memory.{ InMemoryParticipantEventLog, InMemoryParticipantSettingsStore, } -import com.digitalasset.canton.participant.sync.LedgerSyncEvent.TransactionAccepted import com.digitalasset.canton.participant.sync.TimestampedEvent.EventId import com.digitalasset.canton.participant.topology.{ LedgerServerPartyNotifier, @@ -39,23 +32,13 @@ import com.digitalasset.canton.participant.topology.{ ParticipantTopologyManagerOps, } import com.digitalasset.canton.participant.util.DAMLe -import com.digitalasset.canton.participant.{ - DefaultParticipantStateValues, - ParticipantNodeParameters, -} import com.digitalasset.canton.resource.MemoryStorage import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.time.{NonNegativeFiniteDuration, SimClock} import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{ - BaseTest, - DefaultDamlValues, - HasExecutionContext, - LedgerSubmissionId, - LfPartyId, -} +import com.digitalasset.canton.{BaseTest, HasExecutionContext, LedgerSubmissionId, LfPartyId} import org.apache.pekko.stream.Materializer import org.mockito.ArgumentMatchers import org.scalatest.Outcome @@ -152,7 +135,7 @@ class CantonSyncServiceTest extends FixtureAnyWordSpec with BaseTest with HasExe partyNotifier, syncCrypto, pruningProcessor, - DAMLe.newEngine(enableLfDev = false, enableStackTraces = false), + DAMLe.newEngine(enableLfDev = false, enableLfBeta = false, enableStackTraces = false), syncDomainStateFactory, new SimClock(loggerFactory = loggerFactory), new ResourceManagementService.CommunityResourceManagementService( @@ -231,49 +214,5 @@ class CantonSyncServiceTest extends FixtureAnyWordSpec with BaseTest with HasExe result.futureValue } - - def stats(sync: CantonSyncService, packageId: String): Option[Int] = { - - import TransactionBuilder.Implicits.* - - val createNode = TestNodeBuilder.create( - id = TransactionBuilder.newCid, - templateId = Ref.Identifier(packageId, Ref.QualifiedName("M", "D")), - argument = ValueRecord(None, ImmArray.Empty), - signatories = Seq("Alice"), - observers = Seq.empty, - ) - - val tx: VersionedTransaction = TreeTransactionBuilder.toVersionedTransaction(createNode) - - lazy val event = TransactionAccepted( - completionInfoO = DefaultParticipantStateValues.completionInfo(List.empty).some, - transactionMeta = DefaultParticipantStateValues.transactionMeta(), - transaction = CommittedTransaction.subst[Id](tx), - transactionId = DefaultDamlValues.lfTransactionId(1), - recordTime = CantonTimestamp.Epoch.toLf, - divulgedContracts = List.empty, - blindingInfoO = None, - hostedWitnesses = Nil, - contractMetadata = Map(), - domainId = DomainId.tryFromString("da::default"), - ) - - Option(sync.augmentTransactionStatistics(event)) - .collect({ case ta: TransactionAccepted => ta }) - .flatMap(_.completionInfoO) - .flatMap(_.statistics) - .map(_.committed.actions) - - } - - "populate metering" in { f => - stats(f.sync, "packageX") shouldBe Some(1) - } - - "not include ping-pong packages in metering" in { f => - stats(f.sync, PackageID.PingPong) shouldBe Some(0) - } - } } diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala index 0ba67776f..c40ac8726 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala @@ -84,7 +84,7 @@ final class LedgerServerPartyNotifierTest extends AsyncWordSpec with BaseTest { participantId: ParticipantId, ): Future[Unit] = simulateTransaction( - PartyToParticipant( + PartyToParticipant.tryCreate( partyId, None, PositiveInt.one, @@ -139,7 +139,7 @@ final class LedgerServerPartyNotifierTest extends AsyncWordSpec with BaseTest { "add admin parties" in Fixture { fixture => for { _ <- fixture.simulateTransaction( - PartyToParticipant( + PartyToParticipant.tryCreate( participant1.adminParty, Some(domainId), PositiveInt.one, diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedDomainOutboxTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedDomainOutboxTest.scala index 9b5208d83..a8116389e 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedDomainOutboxTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/QueueBasedDomainOutboxTest.scala @@ -278,7 +278,7 @@ class QueueBasedDomainOutboxTest transactions: Seq[GenericSignedTopologyTransaction], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { val num = transactions.size - outbox.newTransactionsAddedToAuthorizedStore(timestamp, num) + outbox.newTransactionsAdded(timestamp, num) } }) ), diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedDomainOutboxTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedDomainOutboxTest.scala index 5eb713a2e..f3c73584c 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedDomainOutboxTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/StoreBasedDomainOutboxTest.scala @@ -250,7 +250,7 @@ class StoreBasedDomainOutboxTest transactions: Seq[GenericSignedTopologyTransaction], )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = { val num = transactions.size - outbox.newTransactionsAddedToAuthorizedStore(timestamp, num) + outbox.newTransactionsAdded(timestamp, num) } }) ), diff --git a/community/testing/src/main/scala/com/digitalasset/canton/MockedNodeParameters.scala b/community/testing/src/main/scala/com/digitalasset/canton/MockedNodeParameters.scala index 042cc2dc0..727e8a239 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/MockedNodeParameters.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/MockedNodeParameters.scala @@ -43,6 +43,8 @@ object MockedNodeParameters { override def devVersionSupport: Boolean = ??? + override def betaVersionSupport: Boolean = ??? + override def dontWarnOnDeprecatedPV: Boolean = ??? override def dbMigrateAndStart: Boolean = false diff --git a/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala b/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala index 50813dda4..1fea190e9 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/ProtocolVersionChecks.scala @@ -44,7 +44,10 @@ trait ProtocolVersionChecksFixtureAnyWordSpec { def onlyRunWith(protocolVersion: ProtocolVersion): OnlyRunWhenWordSpecStringWrapper = new OnlyRunWhenWordSpecStringWrapper(verb, testedProtocolVersion == protocolVersion) - def onlyRunWithLessThan( + def onlyRunWhen(condition: ProtocolVersion => Boolean): OnlyRunWhenWordSpecStringWrapper = + new OnlyRunWhenWordSpecStringWrapper(verb, condition(testedProtocolVersion)) + + def onlyRunLessThan( minProtocolVersion: ProtocolVersion ): OnlyRunWhenWordSpecStringWrapper = new OnlyRunWhenWordSpecStringWrapper(verb, testedProtocolVersion < minProtocolVersion) @@ -204,12 +207,17 @@ trait ProtocolVersionChecksAsyncWordSpec { def onlyRunWithOrGreaterThan( minProtocolVersion: ProtocolVersion ): OnlyRunWhenWordSpecStringWrapper = - new OnlyRunWhenWordSpecStringWrapper(verb, testedProtocolVersion >= minProtocolVersion) + onlyRunWhen(_ >= minProtocolVersion) def onlyRunWithOrLessThan( minProtocolVersion: ProtocolVersion ): OnlyRunWhenWordSpecStringWrapper = - new OnlyRunWhenWordSpecStringWrapper(verb, testedProtocolVersion <= minProtocolVersion) + onlyRunWhen(testedProtocolVersion <= minProtocolVersion) + + private def onlyRunWhen( + condition: ProtocolVersion => Boolean + ): OnlyRunWhenWordSpecStringWrapper = + new OnlyRunWhenWordSpecStringWrapper(verb, condition(testedProtocolVersion)) } protected final class OnlyRunWhenWordSpecStringWrapper( diff --git a/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala b/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala index d7b50e9ec..727deb3dc 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala @@ -522,15 +522,16 @@ class TestingIdentityFactory( val partyId = PartyId.tryFromLfParty(lfParty) val participantsForParty = participants.iterator.filter(_._1.uid != partyId.uid) mkAdd( - PartyToParticipant( - partyId, - None, - threshold = PositiveInt.one, - participantsForParty.map { case (id, permission) => - HostingParticipant(id, permission) - }.toSeq, - groupAddressing = false, - ) + PartyToParticipant + .tryCreate( + partyId, + None, + threshold = PositiveInt.one, + participantsForParty.map { case (id, permission) => + HostingParticipant(id, permission) + }.toSeq, + groupAddressing = false, + ) ) } @@ -721,7 +722,7 @@ class TestingOwnerWithKeys( ) val p1p1 = mkAdd( - PartyToParticipant( + PartyToParticipant.tryCreate( PartyId(UniqueIdentifier.tryCreate("one", key1.id)), None, PositiveInt.one, diff --git a/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala b/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala index 41efb6e88..544ea800d 100644 --- a/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala +++ b/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala @@ -15,7 +15,10 @@ import io.opentelemetry.context.propagation.ContextPropagators import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter import io.opentelemetry.exporter.zipkin.ZipkinSpanExporter import io.opentelemetry.sdk.OpenTelemetrySdk -import io.opentelemetry.sdk.metrics.{SdkMeterProvider, SdkMeterProviderBuilder} +import io.opentelemetry.sdk.metrics.`export`.MetricReader +import io.opentelemetry.sdk.metrics.internal.SdkMeterProviderUtil +import io.opentelemetry.sdk.metrics.internal.`export`.CardinalityLimitSelector +import io.opentelemetry.sdk.metrics.{InstrumentType, SdkMeterProvider, SdkMeterProviderBuilder} import io.opentelemetry.sdk.trace.`export`.{ BatchSpanProcessor, BatchSpanProcessorBuilder, @@ -30,6 +33,23 @@ import scala.util.chaining.scalaUtilChainingOps object OpenTelemetryFactory { + def registerMetricsReaderWithCardinality( + builder: SdkMeterProviderBuilder, + reader: MetricReader, + cardinality: Int, + ): SdkMeterProviderBuilder = { + val cardinalityLimit = new CardinalityLimitSelector { + override def getCardinalityLimit(instrumentType: InstrumentType): Int = cardinality + } + SdkMeterProviderUtil + .registerMetricReaderWithCardinalitySelector( + builder, + reader, + cardinalityLimit, + ) + builder + } + def initializeOpenTelemetry( initializeGlobalOpenTelemetry: Boolean, testingSupportAdhocMetrics: Boolean, @@ -39,6 +59,7 @@ object OpenTelemetryFactory { histogramInventory: HistogramInventory, histogramFilter: MetricsInfoFilter, histogramConfigs: Seq[HistogramDefinition], + cardinality: Int, loggerFactory: NamedLoggerFactory, ): ConfiguredOpenTelemetry = { val logger: TracedLogger = loggerFactory.getTracedLogger(getClass) @@ -74,7 +95,10 @@ object OpenTelemetryFactory { .setSampler(sampler) def setMetricsReader: SdkMeterProviderBuilder => SdkMeterProviderBuilder = builder => - if (metricsEnabled) builder.registerMetricReader(onDemandMetricReader).pipe(attachReporters) + if (metricsEnabled) + registerMetricsReaderWithCardinality(builder, onDemandMetricReader, cardinality).pipe( + attachReporters + ) else builder val meterProviderBuilder = diff --git a/project/BuildCommon.scala b/project/BuildCommon.scala index ce53d0588..a18617bc4 100644 --- a/project/BuildCommon.scala +++ b/project/BuildCommon.scala @@ -729,7 +729,8 @@ object BuildCommon { scalaVersion, sbtVersion, BuildInfoKey("damlLibrariesVersion" -> Dependencies.daml_libraries_version), - BuildInfoKey("protocolVersions" -> List()), + BuildInfoKey("stableProtocolVersions" -> List()), + BuildInfoKey("betaProtocolVersions" -> List()), ), buildInfoPackage := "com.digitalasset.canton.buildinfo", buildInfoObject := "BuildInfo", diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 1809d26ce..1b4f8e90b 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -37,9 +37,9 @@ object Dependencies { lazy val chimney_version = "0.6.1" lazy val circe_version = "0.14.2" lazy val dropwizard_version = "4.1.33" - lazy val flyway_version = "10.12.0" + lazy val flyway_version = "10.15.0" lazy val gcp_kms_version = "2.36.0" - lazy val h2_version = "2.1.210" + lazy val h2_version = "2.2.224" lazy val janino_version = "3.1.12" lazy val javax_annotations_version = "1.3.2" lazy val log4j_version = "2.23.1" diff --git a/project/project/DamlVersions.scala b/project/project/DamlVersions.scala index 857e2c39b..bd25da7e8 100644 --- a/project/project/DamlVersions.scala +++ b/project/project/DamlVersions.scala @@ -7,7 +7,7 @@ object DamlVersions { /** The version of the daml compiler (and in most cases of the daml libraries as well). */ - val version: String = "3.1.0-snapshot.20240613.13124.0.v24e0f5e8" + val version: String = "3.1.0-snapshot.20240624.13145.0.v551f7a20" /** Custom Daml artifacts override version. */