From acc5a21c59543ea0cfd5a71913f8f6be87c0a96d Mon Sep 17 00:00:00 2001 From: Stefano Baghino <43749967+stefanobaghino-da@users.noreply.github.com> Date: Wed, 27 May 2020 08:37:58 +0200 Subject: [PATCH] Fix issues cause by migration V25 and V26 (#6107) * Fix issues cause by migration V25 and V26 Fixes #6017 Please note that migrating sandbox-classic from version 1.0.0 to 1.1.1 will cause undefined behavior if the result is used to back a running ledger setup. No migration should end at 1.1.1 and they should move past that until the version that applies this fix to make sure the index database is in a consistent state. Full write-up on the issue here: https://github.com/digital-asset/daml/issues/6017#issuecomment-634040972 changelog_begin [Sandbox Classic] Fix issue in migrating to version 1.1.1. If you did migrate to version 1.1.1, do not use the resulting setup but make sure to migrate until this version, which fixes the issue See https://github.com/digital-asset/daml/issues/6017 changelog_end * Add trailing newline * Fix wrong hash for migration * Address https://github.com/digital-asset/daml/pull/6107#discussion_r430469918 --- .../V32_0__Drop_archived_contracts.sha256 | 1 + .../V32_0__Drop_archived_contracts.sql | 18 ++++++ .../postgres/V28__Fix_key_hashes.scala | 50 ++-------------- .../postgres/V32_1__Fix_key_hashes.scala | 57 +++++++++++++++++++ 4 files changed, 81 insertions(+), 45 deletions(-) create mode 100644 ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sha256 create mode 100644 ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sql create mode 100644 ledger/sandbox/src/main/scala/db/migration/postgres/V32_1__Fix_key_hashes.scala diff --git a/ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sha256 b/ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sha256 new file mode 100644 index 000000000000..4918bbbbb83b --- /dev/null +++ b/ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sha256 @@ -0,0 +1 @@ +4ebb7ee7851f7f3d1874e04eee91620f8c3a3716ce34e5c3159e71e17bae490d diff --git a/ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sql b/ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sql new file mode 100644 index 000000000000..eb0e9d6c3532 --- /dev/null +++ b/ledger/sandbox/src/main/resources/db/migration/postgres/V32_0__Drop_archived_contracts.sql @@ -0,0 +1,18 @@ +-- Copyright (c) 2019 The DAML Authors. All rights reserved. +-- SPDX-License-Identifier: Apache-2.0 + +--------------------------------------------------------------------------------------------------- +-- V32.0: Drop archived contracts (see https://github.com/digital-asset/daml/issues/6017) +-- +-- V26.0 mistakenly added data for archived contracts to the participant_contracts table +-- Since that table should _only_ be used for execution and validation it should only +-- contain the most up-to-date state for active contracts. +-- +--------------------------------------------------------------------------------------------------- + +-- To remove the archived contracts we rely on consuming events from the participant_events +-- table and remove from both participant_contract_witnesses and participant_contracts all +-- those rows that are related to contract_ids that have been subject to a consuming event + +delete from participant_contract_witnesses where contract_id in (select contract_id from participant_events where exercise_consuming); +delete from participant_contracts where contract_id in (select contract_id from participant_events where exercise_consuming); diff --git a/ledger/sandbox/src/main/scala/db/migration/postgres/V28__Fix_key_hashes.scala b/ledger/sandbox/src/main/scala/db/migration/postgres/V28__Fix_key_hashes.scala index 5b20d6d212bc..19ce390ea6ca 100644 --- a/ledger/sandbox/src/main/scala/db/migration/postgres/V28__Fix_key_hashes.scala +++ b/ledger/sandbox/src/main/scala/db/migration/postgres/V28__Fix_key_hashes.scala @@ -3,55 +3,15 @@ package db.migration.postgres -import com.daml.lf.data.Ref -import com.daml.lf.transaction.Node.GlobalKey -import com.daml.platform.store.serialization.ValueSerializer import org.flywaydb.core.api.migration.{BaseJavaMigration, Context} final class V28__Fix_key_hashes extends BaseJavaMigration { - private val SELECT_KEYS = - "select contract_id, template_id, create_key_value from participant_events where create_key_value is not null and create_consumed_at is null" - - private val FIX_HASH = - "update participant_contracts set create_key_hash = ? where contract_id = ?" - override def migrate(context: Context): Unit = { - val conn = context.getConnection - var selectKeys: java.sql.Statement = null - var fixHash: java.sql.PreparedStatement = null - var keysRows: java.sql.ResultSet = null - try { - fixHash = conn.prepareStatement(FIX_HASH) - - selectKeys = conn.createStatement() - keysRows = selectKeys.executeQuery(SELECT_KEYS) - - while (keysRows.next()) { - val contractId = keysRows.getString("contract_id") - val rawTemplateId = keysRows.getString("template_id") - val templateId = Ref.Identifier.assertFromString(rawTemplateId) - val rawKeyValue = keysRows.getBinaryStream("create_key_value") - val keyValue = ValueSerializer.deserializeValue(rawKeyValue) - val key = GlobalKey.assertBuild(templateId, keyValue.value) - val hashBytes = key.hash.bytes.toInputStream - - fixHash.setBinaryStream(1, hashBytes) - fixHash.setString(2, contractId) - fixHash.addBatch() - } - val _ = fixHash.executeBatch() - - } finally { - if (selectKeys != null) { - selectKeys.close() - } - if (fixHash != null) { - fixHash.close() - } - if (keysRows != null) { - keysRows.close() - } - } + // Content of migration moved to V32_1 (see https://github.com/digital-asset/daml/issues/6017) + // This does not break if someone already executed this migration + // because Flyway does not keep checksums for Java-based migrations + // The only downside is that the operation is performed a second time + // without a real need, but it's safe to do it } } diff --git a/ledger/sandbox/src/main/scala/db/migration/postgres/V32_1__Fix_key_hashes.scala b/ledger/sandbox/src/main/scala/db/migration/postgres/V32_1__Fix_key_hashes.scala new file mode 100644 index 000000000000..b541c8f9cdb6 --- /dev/null +++ b/ledger/sandbox/src/main/scala/db/migration/postgres/V32_1__Fix_key_hashes.scala @@ -0,0 +1,57 @@ +// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package db.migration.postgres + +import com.daml.lf.data.Ref +import com.daml.lf.transaction.Node.GlobalKey +import com.daml.platform.store.serialization.ValueSerializer +import org.flywaydb.core.api.migration.{BaseJavaMigration, Context} + +final class V32_1__Fix_key_hashes extends BaseJavaMigration { + + private val SELECT_KEYS = + "select participant_events.contract_id, participant_events.template_id, create_key_value from participant_events inner join participant_contracts on participant_events.contract_id = participant_contracts.contract_id where create_key_value is not null" + + private val FIX_HASH = + "update participant_contracts set create_key_hash = ? where contract_id = ?" + + override def migrate(context: Context): Unit = { + val conn = context.getConnection + var selectKeys: java.sql.Statement = null + var fixHash: java.sql.PreparedStatement = null + var keysRows: java.sql.ResultSet = null + try { + fixHash = conn.prepareStatement(FIX_HASH) + + selectKeys = conn.createStatement() + keysRows = selectKeys.executeQuery(SELECT_KEYS) + + while (keysRows.next()) { + val contractId = keysRows.getString("contract_id") + val rawTemplateId = keysRows.getString("template_id") + val templateId = Ref.Identifier.assertFromString(rawTemplateId) + val rawKeyValue = keysRows.getBinaryStream("create_key_value") + val keyValue = ValueSerializer.deserializeValue(rawKeyValue) + val key = GlobalKey.assertBuild(templateId, keyValue.value) + val hashBytes = key.hash.bytes.toInputStream + + fixHash.setBinaryStream(1, hashBytes) + fixHash.setString(2, contractId) + fixHash.addBatch() + } + val _ = fixHash.executeBatch() + + } finally { + if (keysRows != null) { + keysRows.close() + } + if (selectKeys != null) { + selectKeys.close() + } + if (fixHash != null) { + fixHash.close() + } + } + } +}