Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
jgainerdewar committed Jul 28, 2022
2 parents 432bcba + 5b3b664 commit b3f8906
Show file tree
Hide file tree
Showing 29 changed files with 632 additions and 52 deletions.
9 changes: 9 additions & 0 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,12 @@ lazy val cloudSupport = project
.dependsOn(common)
.dependsOn(common % "test->test")

lazy val azureBlobFileSystem = (project in file("filesystems/blob"))
.withLibrarySettings("cromwell-azure-blobFileSystem", blobFileSystemDependencies)
.dependsOn(core)
.dependsOn(core % "test->test")
.dependsOn(common % "test->test")

lazy val awsS3FileSystem = (project in file("filesystems/s3"))
.withLibrarySettings("cromwell-aws-s3filesystem", s3FileSystemDependencies)
.dependsOn(core)
Expand Down Expand Up @@ -249,10 +255,12 @@ lazy val engine = project
.dependsOn(drsFileSystem)
.dependsOn(sraFileSystem)
.dependsOn(awsS3FileSystem)
.dependsOn(azureBlobFileSystem)
.dependsOn(awsS3FileSystem % "test->test")
.dependsOn(drsFileSystem % "test->test")
.dependsOn(httpFileSystem % "test->test")
.dependsOn(ftpFileSystem % "test->test")
.dependsOn(azureBlobFileSystem % "test->test")
.dependsOn(`cloud-nio-spi`)
.dependsOn(languageFactoryCore)
.dependsOn(cwlV1_0LanguageFactory % "test->test")
Expand Down Expand Up @@ -391,6 +399,7 @@ lazy val root = (project in file("."))
.aggregate(`cromwell-drs-localizer`)
.aggregate(awsBackend)
.aggregate(awsS3FileSystem)
.aggregate(azureBlobFileSystem)
.aggregate(backend)
.aggregate(centaur)
.aggregate(centaurCwlRunner)
Expand Down
5 changes: 5 additions & 0 deletions database/migration/src/main/resources/changelog.xml
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,11 @@
<include file="changesets/mariadb_engine_schema.xml" relativeToChangelogFile="true" />
<include file="changesets/resync_engine_schema.xml" relativeToChangelogFile="true" />
<include file="changesets/enlarge_job_store_ids.xml" relativeToChangelogFile="true" />
<include file="changesets/enlarge_call_cache_entry_ids.xml" relativeToChangelogFile="true" />
<include file="changesets/enlarge_call_caching_aggregation_entry_id.xml" relativeToChangelogFile="true" />
<include file="changesets/enlarge_call_caching_detritus_entry_id.xml" relativeToChangelogFile="true" />
<include file="changesets/enlarge_call_caching_simpleton_entry_id.xml" relativeToChangelogFile="true" />
<include file="changesets/reset_call_caching_hash_entry_id_autoincrement.xml" relativeToChangelogFile="true" />
<!-- REMINDER!
Before appending here, did you remember to include the 'objectQuotingStrategy="QUOTE_ALL_OBJECTS"' line in your changeset/xyz.xml...?
-->
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<databaseChangeLog objectQuotingStrategy="QUOTE_ALL_OBJECTS"
xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.3.xsd">

<!-- BEGIN dropping FKs -->
<!-- Drop the foreign key constraint from CALL_CACHING_AGGREGATION_ENTRY to CALL_CACHING_ENTRY to allow for the latter's PK to be widened. -->
<changeSet author="sshah" id="drop_call_caching_aggregation_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<dropForeignKeyConstraint
baseTableName="CALL_CACHING_AGGREGATION_ENTRY"
constraintName="FK_CALL_CACHING_AGGREGATION_ENTRY_CALL_CACHING_ENTRY_ID"
/>
</changeSet>

<!-- Drop the foreign key constraint from CALL_CACHING_DETRITUS_ENTRY to CALL_CACHING_ENTRY to allow for the latter's PK to be widened. -->
<changeSet author="sshah" id="drop_call_caching_detritus_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<dropForeignKeyConstraint
baseTableName="CALL_CACHING_DETRITUS_ENTRY"
constraintName="FK_CALL_CACHING_DETRITUS_ENTRY_CALL_CACHING_ENTRY_ID"
/>
</changeSet>

<!-- Drop the foreign key constraint from CALL_CACHING_HASH_ENTRY to CALL_CACHING_ENTRY to allow for the latter's PK to be widened. -->
<changeSet author="sshah" id="drop_call_caching_hash_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<dropForeignKeyConstraint
baseTableName="CALL_CACHING_HASH_ENTRY"
constraintName="FK_CALL_CACHING_HASH_ENTRY_CALL_CACHING_ENTRY_ID"
/>
</changeSet>

<!-- Drop the foreign key constraint from CALL_CACHING_SIMPLETON_ENTRY to CALL_CACHING_ENTRY to allow for the latter's PK to be widened. -->
<changeSet author="sshah" id="drop_call_caching_simpleton_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<dropForeignKeyConstraint
baseTableName="CALL_CACHING_SIMPLETON_ENTRY"
constraintName="FK_CALL_CACHING_SIMPLETON_ENTRY_CALL_CACHING_ENTRY_ID"
/>
</changeSet>
<!-- END dropping FKs -->

<!-- BEGIN CALL_CACHING_ENTRY_ID PK widening -->
<!-- For HSQLDB and Postgres database there are 2 changesets: one for modifying the table column type, and another one for altering the autoincrementing sequence.
The other DBs can be refactored similarly with a single addAutoIncrement changeset. The start of autoincrement is set at 20,000,000,000. -->
<changeSet author="sshah" id="enlarge_call_cache_entry_id" dbms="hsqldb">
<modifyDataType
columnName="CALL_CACHING_ENTRY_ID"
tableName="CALL_CACHING_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="reset_call_cache_entry_id_autoincrement" dbms="mysql,hsqldb,mariadb">
<addAutoIncrement
columnName="CALL_CACHING_ENTRY_ID"
columnDataType="BIGINT"
incrementBy="1"
startWith="20000000000"
tableName="CALL_CACHING_ENTRY"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_cache_entry_id" dbms="postgresql">
<modifyDataType
columnName="CALL_CACHING_ENTRY_ID"
tableName="CALL_CACHING_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_cache_entry_id_seq" dbms="postgresql">
<preConditions onFail="MARK_RAN">
<!-- idempotency check (noop if the sequence is present and already consistent what the alter would do) -->
<sqlCheck expectedResult="0">
SELECT count(*)
FROM information_schema.sequences
WHERE sequence_name = 'CALL_CACHING_ENTRY_CALL_CACHING_ENTRY_ID_seq'
AND data_type = 'bigint';
</sqlCheck>
</preConditions>
<sql>ALTER SEQUENCE "CALL_CACHING_ENTRY_CALL_CACHING_ENTRY_ID_seq" as bigint RESTART WITH 20000000000;</sql>
</changeSet>
<!-- END CALL_CACHING_ENTRY PK widening -->

<!-- BEGIN widening FKs to match PK -->
<changeSet author="sshah" id="enlarge_call_caching_aggregation_entry_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<modifyDataType
tableName="CALL_CACHING_AGGREGATION_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="enlarge_call_caching_detritus_entry_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<modifyDataType
tableName="CALL_CACHING_DETRITUS_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="enlarge_call_caching_hash_entry_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<modifyDataType
tableName="CALL_CACHING_HASH_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="enlarge_call_caching_simpleton_entry_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<modifyDataType
tableName="CALL_CACHING_SIMPLETON_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
newDataType="BIGINT"
/>
</changeSet>
<!-- END widening FKs to match PK -->

<!-- MariaDB's FK NotNull constraint does not survive the widening above and must be recreated explicitly. -->
<!-- BEGIN Restoring FK NotNull constraint -->
<changeSet author="sshah" id="mariadb_not_null_constraint_call_caching_aggregation_entry_fk" dbms="mariadb,mysql">
<addNotNullConstraint
tableName="CALL_CACHING_AGGREGATION_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
columnDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="mariadb_not_null_constraint_call_caching_detritus_entry_fk" dbms="mariadb">
<addNotNullConstraint
tableName="CALL_CACHING_DETRITUS_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
columnDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="mariadb_not_null_constraint_call_caching_hash_entry_fk" dbms="mariadb">
<addNotNullConstraint
tableName="CALL_CACHING_HASH_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
columnDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="mariadb_not_null_constraint_call_caching_simpleton_entry_fk" dbms="mariadb">
<addNotNullConstraint
tableName="CALL_CACHING_SIMPLETON_ENTRY"
columnName="CALL_CACHING_ENTRY_ID"
columnDataType="BIGINT"
/>
</changeSet>
<!-- END Restoring FK NotNull constraint -->

<!-- BEGIN Restoring the FKs -->
<changeSet author="sshah" id="recreate_call_caching_aggregation_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<addForeignKeyConstraint
constraintName="FK_CALL_CACHING_AGGREGATION_ENTRY_CALL_CACHING_ENTRY_ID"
baseColumnNames="CALL_CACHING_ENTRY_ID"
baseTableName="CALL_CACHING_AGGREGATION_ENTRY"
referencedTableName="CALL_CACHING_ENTRY"
referencedColumnNames="CALL_CACHING_ENTRY_ID"
/>
</changeSet>

<changeSet author="sshah" id="recreate_call_caching_detritus_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<addForeignKeyConstraint
constraintName="FK_CALL_CACHING_DETRITUS_ENTRY_CALL_CACHING_ENTRY_ID"
baseColumnNames="CALL_CACHING_ENTRY_ID"
baseTableName="CALL_CACHING_DETRITUS_ENTRY"
referencedTableName="CALL_CACHING_ENTRY"
referencedColumnNames="CALL_CACHING_ENTRY_ID"
/>
</changeSet>

<changeSet author="sshah" id="recreate_call_caching_hash_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<addForeignKeyConstraint
constraintName="FK_CALL_CACHING_HASH_ENTRY_CALL_CACHING_ENTRY_ID"
baseColumnNames="CALL_CACHING_ENTRY_ID"
baseTableName="CALL_CACHING_HASH_ENTRY"
referencedTableName="CALL_CACHING_ENTRY"
referencedColumnNames="CALL_CACHING_ENTRY_ID"
/>
</changeSet>

<changeSet author="sshah" id="recreate_call_caching_simpleton_entry_call_caching_entry_id_fk" dbms="mysql,hsqldb,postgresql,mariadb">
<addForeignKeyConstraint
constraintName="FK_CALL_CACHING_SIMPLETON_ENTRY_CALL_CACHING_ENTRY_ID"
baseColumnNames="CALL_CACHING_ENTRY_ID"
baseTableName="CALL_CACHING_SIMPLETON_ENTRY"
referencedTableName="CALL_CACHING_ENTRY"
referencedColumnNames="CALL_CACHING_ENTRY_ID"
/>
</changeSet>
<!-- END Restoring the FKs -->

</databaseChangeLog>
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<databaseChangeLog objectQuotingStrategy="QUOTE_ALL_OBJECTS"
xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.3.xsd">

<!-- BEGIN CALL_CACHING_AGGREGATION_ENTRY PK widening -->
<!-- For HSQLDB and Postgres database there are 2 changesets: one for modifying the table column type, and another one for altering the autoincrementing sequence.
The other DBs can be refactored similarly with a single addAutoIncrement changeset. The start of autoincrement is set at 20,000,000,000. -->
<changeSet author="sshah" id="enlarge_call_cache_aggregation_entry_id" dbms="hsqldb">
<modifyDataType
columnName="CALL_CACHING_AGGREGATION_ENTRY_ID"
tableName="CALL_CACHING_AGGREGATION_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="reset_call_caching_aggregation_entry_id_autoincrement" dbms="mysql,hsqldb,mariadb">
<addAutoIncrement
columnName="CALL_CACHING_AGGREGATION_ENTRY_ID"
columnDataType="BIGINT"
incrementBy="1"
startWith="20000000000"
tableName="CALL_CACHING_AGGREGATION_ENTRY"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_caching_aggregation_entry_id" dbms="postgresql">
<modifyDataType
columnName="CALL_CACHING_AGGREGATION_ENTRY_ID"
tableName="CALL_CACHING_AGGREGATION_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_caching_aggregation_entry_id_seq" dbms="postgresql">
<preConditions onFail="MARK_RAN">
<!-- idempotency check (noop if the sequence is present and already consistent what the alter would do) -->
<sqlCheck expectedResult="0">
SELECT count(*)
FROM information_schema.sequences
WHERE sequence_name = 'CALL_CACHING_AGGREGATION_ENTR_CALL_CACHING_AGGREGATION_ENTR_seq'
AND data_type = 'bigint';
</sqlCheck>
</preConditions>
<sql>ALTER SEQUENCE "CALL_CACHING_AGGREGATION_ENTR_CALL_CACHING_AGGREGATION_ENTR_seq" as bigint RESTART WITH 20000000000;</sql>
</changeSet>
<!-- END CALL_CACHING_AGGREGATION_ENTRY PK widening -->

</databaseChangeLog>
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<databaseChangeLog objectQuotingStrategy="QUOTE_ALL_OBJECTS"
xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.3.xsd">

<!-- BEGIN CALL_CACHING_DETRITUS_ENTRY PK widening -->
<!-- For HSQLDB and Postgres database there are 2 changesets: one for modifying the table column type, and another one for altering the autoincrementing sequence.
The other DBs can be refactored similarly with a single addAutoIncrement changeset. The start of autoincrement is set at 20,000,000,000. -->
<changeSet author="sshah" id="enlarge_call_cache_detritus_entry_id" dbms="hsqldb">
<modifyDataType
columnName="CALL_CACHING_DETRITUS_ENTRY_ID"
tableName="CALL_CACHING_DETRITUS_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="reset_call_caching_detritus_entry_id_autoincrement" dbms="mysql,hsqldb,mariadb">
<addAutoIncrement
columnName="CALL_CACHING_DETRITUS_ENTRY_ID"
columnDataType="BIGINT"
incrementBy="1"
startWith="20000000000"
tableName="CALL_CACHING_DETRITUS_ENTRY"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_caching_detritus_entry_id" dbms="postgresql">
<modifyDataType
columnName="CALL_CACHING_DETRITUS_ENTRY_ID"
tableName="CALL_CACHING_DETRITUS_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_caching_detritus_entry_id_seq" dbms="postgresql">
<preConditions onFail="MARK_RAN">
<!-- idempotency check (noop if the sequence is present and already consistent what the alter would do) -->
<sqlCheck expectedResult="0">
SELECT count(*)
FROM information_schema.sequences
WHERE sequence_name = 'CALL_CACHING_DETRITUS_ENTRY_CALL_CACHING_DETRITUS_ENTRY_ID_seq'
AND data_type = 'bigint';
</sqlCheck>
</preConditions>
<sql>ALTER SEQUENCE "CALL_CACHING_DETRITUS_ENTRY_CALL_CACHING_DETRITUS_ENTRY_ID_seq" as bigint RESTART WITH 20000000000;</sql>
</changeSet>
<!-- END CALL_CACHING_DETRITUS_ENTRY PK widening -->

</databaseChangeLog>
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<databaseChangeLog objectQuotingStrategy="QUOTE_ALL_OBJECTS"
xmlns="http://www.liquibase.org/xml/ns/dbchangelog"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.liquibase.org/xml/ns/dbchangelog http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.3.xsd">

<!-- BEGIN CALL_CACHING_SIMPLETON_ENTRY PK widening -->
<!-- For HSQLDB and Postgres database there are 2 changesets: one for modifying the table column type, and another one for altering the autoincrementing sequence.
The other DBs can be refactored similarly with a single addAutoIncrement changeset. The start of autoincrement is set at 20,000,000,000. -->
<changeSet author="sshah" id="enlarge_call_cache_simpleton_entry_id" dbms="hsqldb">
<modifyDataType
columnName="CALL_CACHING_SIMPLETON_ENTRY_ID"
tableName="CALL_CACHING_SIMPLETON_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="reset_call_caching_simpleton_entry_id_autoincrement" dbms="mysql,hsqldb,mariadb">
<addAutoIncrement
columnName="CALL_CACHING_SIMPLETON_ENTRY_ID"
columnDataType="BIGINT"
incrementBy="1"
startWith="20000000000"
tableName="CALL_CACHING_SIMPLETON_ENTRY"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_caching_simpleton_entry_id" dbms="postgresql">
<modifyDataType
columnName="CALL_CACHING_SIMPLETON_ENTRY_ID"
tableName="CALL_CACHING_SIMPLETON_ENTRY"
newDataType="BIGINT"
/>
</changeSet>

<changeSet author="sshah" id="postgresql_enlarge_call_caching_simpleton_entry_id_seq" dbms="postgresql">
<preConditions onFail="MARK_RAN">
<!-- idempotency check (noop if the sequence is present and already consistent what the alter would do) -->
<sqlCheck expectedResult="0">
SELECT count(*)
FROM information_schema.sequences
WHERE sequence_name = 'CALL_CACHING_SIMPLETON_ENTRY_CALL_CACHING_SIMPLETON_ENTRY_I_seq'
AND data_type = 'bigint';
</sqlCheck>
</preConditions>
<sql>ALTER SEQUENCE "CALL_CACHING_SIMPLETON_ENTRY_CALL_CACHING_SIMPLETON_ENTRY_I_seq" as bigint RESTART WITH 20000000000;</sql>
</changeSet>
<!-- END CALL_CACHING_SIMPLETON_ENTRY PK widening -->

</databaseChangeLog>
Loading

0 comments on commit b3f8906

Please sign in to comment.