CDC stands for Change Data Capture, which is a broad concept, as long as it can capture the change data, it can be called CDC. Flink CDC is a Log message-based data capture tool, all the inventory and incremental data can be captured. Taking MySQL as an example, it can easily capture Binlog data through Debezium and process the calculations in real time to send them to the data lake. The data lake can then be queried by other engines.

This section will show how to ingest one table or multiple tables into the data lake for both Iceberg format and Mixed-Iceberg format.

Ingest into one table

Iceberg format

The following example will show how MySQL CDC data is written to an Iceberg table.

Requirements

Please add Flink SQL Connector MySQL CDC and Iceberg Jars to the lib directory of the Flink engine package.

  1. CREATE TABLE products (
  2. id INT,
  3. name STRING,
  4. description STRING,
  5. PRIMARY KEY (id) NOT ENFORCED
  6. ) WITH (
  7. 'connector' = 'mysql-cdc',
  8. 'hostname' = 'localhost',
  9. 'port' = '3306',
  10. 'username' = 'root',
  11. 'password' = '123456',
  12. 'database-name' = 'mydb',
  13. 'table-name' = 'products'
  14. );
  15. CREATE CATALOG iceberg_hadoop_catalog WITH (
  16. 'type'='iceberg',
  17. 'catalog-type'='hadoop',
  18. 'warehouse'='hdfs://nn:8020/warehouse/path',
  19. 'property-version'='1'
  20. );
  21. CREATE TABLE IF NOT EXISTS `iceberg_hadoop_catalog`.`default`.`sample` (
  22. id INT,
  23. name STRING,
  24. description STRING,
  25. PRIMARY KEY (id) NOT ENFORCED
  26. );
  27. INSERT INTO `iceberg_hadoop_catalog`.`default`.`sample` SELECT * FROM products;

Mixed-Iceberg format

The following example will show how MySQL CDC data is written to a Mixed-Iceberg table.

Requirements

Please add Flink SQL Connector MySQL CDC and Amoro Jars to the lib directory of the Flink engine package.

  1. CREATE TABLE products (
  2. id INT,
  3. name STRING,
  4. description STRING,
  5. PRIMARY KEY (id) NOT ENFORCED
  6. ) WITH (
  7. 'connector' = 'mysql-cdc',
  8. 'hostname' = 'localhost',
  9. 'port' = '3306',
  10. 'username' = 'root',
  11. 'password' = '123456',
  12. 'database-name' = 'mydb',
  13. 'table-name' = 'products'
  14. );
  15. CREATE CATALOG arctic_catalog WITH (
  16. 'type'='arctic',
  17. 'metastore.url'='thrift://<ip>:<port>/<catalog_name_in_metastore>'
  18. );
  19. CREATE TABLE IF NOT EXISTS `arctic_catalog`.`db`.`test_tb`(
  20. id INT,
  21. name STRING,
  22. description STRING,
  23. PRIMARY KEY (id) NOT ENFORCED
  24. );
  25. INSERT INTO `arctic_catalog`.`db`.`test_tb` SELECT * FROM products;

Ingest Into multiple tables

Iceberg format

The following example will show how to write CDC data from multiple MySQL tables into the corresponding Iceberg table.

Requirements

Please add Flink Connector MySQL CDC and Iceberg dependencies to your Maven project’s pom.xml file.

  1. import com.ververica.cdc.connectors.mysql.source.MySqlSource;
  2. import com.ververica.cdc.connectors.mysql.table.MySqlDeserializationConverterFactory;
  3. import com.ververica.cdc.debezium.DebeziumDeserializationSchema;
  4. import com.ververica.cdc.debezium.table.MetadataConverter;
  5. import com.ververica.cdc.debezium.table.RowDataDebeziumDeserializeSchema;
  6. import org.apache.flink.api.common.eventtime.WatermarkStrategy;
  7. import org.apache.flink.api.common.typeinfo.TypeInformation;
  8. import org.apache.flink.api.java.tuple.Tuple2;
  9. import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
  10. import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
  11. import org.apache.flink.streaming.api.functions.ProcessFunction;
  12. import org.apache.flink.table.api.*;
  13. import org.apache.flink.table.catalog.*;
  14. import org.apache.flink.table.data.RowData;
  15. import org.apache.flink.table.data.conversion.RowRowConverter;
  16. import org.apache.flink.table.data.utils.JoinedRowData;
  17. import org.apache.flink.table.types.logical.RowType;
  18. import org.apache.flink.util.Collector;
  19. import org.apache.flink.util.OutputTag;
  20. import org.apache.hadoop.conf.Configuration;
  21. import org.apache.iceberg.CatalogProperties;
  22. import org.apache.iceberg.Table;
  23. import org.apache.iceberg.catalog.Catalog;
  24. import org.apache.iceberg.catalog.Namespace;
  25. import org.apache.iceberg.catalog.TableIdentifier;
  26. import org.apache.iceberg.flink.CatalogLoader;
  27. import org.apache.iceberg.flink.TableLoader;
  28. import org.apache.iceberg.flink.sink.FlinkSink;
  29. import org.apache.iceberg.relocated.com.google.common.collect.Maps;
  30. import org.apache.kafka.connect.data.Struct;
  31. import org.apache.kafka.connect.source.SourceRecord;
  32. import java.util.*;
  33. import static com.ververica.cdc.connectors.mysql.table.MySqlReadableMetadata.DATABASE_NAME;
  34. import static com.ververica.cdc.connectors.mysql.table.MySqlReadableMetadata.TABLE_NAME;
  35. import static java.util.stream.Collectors.toMap;
  36. public class MySqlCDC2IcebergExample {
  37. public static void main(String[] args) throws Exception {
  38. List<Tuple2<ObjectPath, ResolvedCatalogTable>> pathAndTable = initSourceTables();
  39. Map<String, RowDataDebeziumDeserializeSchema> debeziumDeserializeSchemas = getDebeziumDeserializeSchemas(pathAndTable);
  40. MySqlSource<RowData> mySqlSource = MySqlSource.<RowData>builder()
  41. .hostname("yourHostname")
  42. .port(yourPort)
  43. .databaseList("test_db")
  44. // setting up tables to be captured
  45. .tableList("test_db.user", "test_db.product")
  46. .username("yourUsername")
  47. .password("yourPassword")
  48. .deserializer(new CompositeDebeziumDeserializationSchema(debeziumDeserializeSchemas))
  49. .build();
  50. StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  51. // enable checkpoint
  52. env.enableCheckpointing(60000);
  53. // Split CDC streams by table name
  54. SingleOutputStreamOperator<Void> process = env
  55. .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
  56. .setParallelism(4)
  57. .process(new SplitCdcStreamFunction(pathAndTable.stream()
  58. .collect(toMap(e -> e.f0.toString(),
  59. e -> RowRowConverter.create(e.f1.getResolvedSchema().toPhysicalRowDataType())))))
  60. .name("split stream");
  61. // create Iceberg sink and insert into CDC data
  62. Map<String, String> properties = Maps.newHashMap();
  63. properties.put(CatalogProperties.WAREHOUSE_LOCATION, "yourWarehouseLocation");
  64. properties.put(CatalogProperties.URI, "yourThriftUri");
  65. CatalogLoader catalogLoader = CatalogLoader.hadoop("hadoop_catalog", new Configuration(), properties);
  66. Catalog icebergHadoopCatalog = catalogLoader.loadCatalog();
  67. Map<String, TableSchema> sinkTableSchemas = new HashMap<>();
  68. sinkTableSchemas.put("user", TableSchema.builder().field("id", DataTypes.INT())
  69. .field("name", DataTypes.STRING()).field("op_time", DataTypes.TIMESTAMP()).build());
  70. sinkTableSchemas.put("product", TableSchema.builder().field("productId", DataTypes.INT())
  71. .field("price", DataTypes.DECIMAL(12, 6)).field("saleCount", DataTypes.INT()).build());
  72. for (Map.Entry<String, TableSchema> entry : sinkTableSchemas.entrySet()) {
  73. TableIdentifier identifier = TableIdentifier.of(Namespace.of("test_db"), entry.getKey());
  74. Table table = icebergHadoopCatalog.loadTable(identifier);
  75. TableLoader tableLoader = TableLoader.fromCatalog(catalogLoader, identifier);
  76. FlinkSink.forRowData(process.getSideOutput(new OutputTag<RowData>(entry.getKey()){}))
  77. .tableLoader(tableLoader)
  78. .table(table)
  79. .append();
  80. }
  81. env.execute("Sync MySQL to the Iceberg table");
  82. }
  83. static class CompositeDebeziumDeserializationSchema
  84. implements DebeziumDeserializationSchema<RowData> {
  85. private final Map<String, RowDataDebeziumDeserializeSchema> deserializationSchemaMap;
  86. public CompositeDebeziumDeserializationSchema(
  87. final Map<String, RowDataDebeziumDeserializeSchema> deserializationSchemaMap) {
  88. this.deserializationSchemaMap = deserializationSchemaMap;
  89. }
  90. @Override
  91. public void deserialize(final SourceRecord record, final Collector<RowData> out)
  92. throws Exception {
  93. final Struct value = (Struct) record.value();
  94. final Struct source = value.getStruct("source");
  95. final String db = source.getString("db");
  96. final String table = source.getString("table");
  97. if (deserializationSchemaMap == null) {
  98. throw new IllegalStateException("deserializationSchemaMap can not be null!");
  99. }
  100. deserializationSchemaMap.get(db + "." + table).deserialize(record, out);
  101. }
  102. @Override
  103. public TypeInformation<RowData> getProducedType() {
  104. return TypeInformation.of(RowData.class);
  105. }
  106. }
  107. static class SplitCdcStreamFunction extends ProcessFunction<RowData, Void> {
  108. private final Map<String, RowRowConverter> converters;
  109. public SplitCdcStreamFunction(final Map<String, RowRowConverter> converterMap) {
  110. this.converters = converterMap;
  111. }
  112. @Override
  113. public void processElement(final RowData rowData,
  114. final ProcessFunction<RowData, Void>.Context ctx, final Collector<Void> out)
  115. throws Exception {
  116. // JoinedRowData like +I{row1=+I(1,2.340000,3), row2=+I(product,test_db)}
  117. // so rowData.getArity() - 2 is the tableName field index
  118. final String tableName = rowData.getString(rowData.getArity() - 2).toString();
  119. ctx.output(new OutputTag<RowData>(tableName) {},
  120. getField(JoinedRowData.class, (JoinedRowData) rowData, "row1"));
  121. }
  122. private static <O, V> V getField(Class<O> clazz, O obj, String fieldName) {
  123. try {
  124. java.lang.reflect.Field field = clazz.getDeclaredField(fieldName);
  125. field.setAccessible(true);
  126. Object v = field.get(obj);
  127. return v == null ? null : (V) v;
  128. } catch (NoSuchFieldException | IllegalAccessException e) {
  129. throw new RuntimeException(e);
  130. }
  131. }
  132. }
  133. private static List<Tuple2<ObjectPath, ResolvedCatalogTable>> initSourceTables() {
  134. List<Tuple2<ObjectPath, ResolvedCatalogTable>> pathAndTable = new ArrayList<>();
  135. // build table "user"
  136. Schema userSchema = Schema.newBuilder()
  137. .column("id", DataTypes.INT().notNull())
  138. .column("name", DataTypes.STRING())
  139. .column("op_time", DataTypes.TIMESTAMP())
  140. .primaryKey("id")
  141. .build();
  142. List<Column> userTableCols = Stream.of(
  143. Column.physical("id", DataTypes.INT().notNull()),
  144. Column.physical("name", DataTypes.STRING()),
  145. Column.physical("op_time", DataTypes.TIMESTAMP())).collect(Collectors.toList());
  146. Schema.UnresolvedPrimaryKey userPrimaryKey = userSchema.getPrimaryKey().orElseThrow(() -> new RuntimeException("table user required pk "));
  147. ResolvedSchema userResolvedSchema = new ResolvedSchema(userTableCols, Collections.emptyList(), UniqueConstraint.primaryKey(
  148. userPrimaryKey.getConstraintName(), userPrimaryKey.getColumnNames()));
  149. ResolvedCatalogTable userTable = new ResolvedCatalogTable(
  150. CatalogTable.of(userSchema, "", Collections.emptyList(), new HashMap<>()), userResolvedSchema);
  151. pathAndTable.add(Tuple2.of(new ObjectPath("test_db", "user"), userTable));
  152. // build table "product"
  153. Schema productSchema = Schema.newBuilder()
  154. .column("productId", DataTypes.INT().notNull())
  155. .column("price", DataTypes.DECIMAL(12, 6))
  156. .column("saleCount", DataTypes.INT())
  157. .primaryKey("productId")
  158. .build();
  159. List<Column> productTableCols = Stream.of(
  160. Column.physical("productId", DataTypes.INT().notNull()),
  161. Column.physical("price", DataTypes.DECIMAL(12, 6)),
  162. Column.physical("saleCount", DataTypes.INT())).collect(Collectors.toList());
  163. Schema.UnresolvedPrimaryKey productPrimaryKey = productSchema.getPrimaryKey().orElseThrow(() -> new RuntimeException("table product required pk "));
  164. ResolvedSchema productResolvedSchema = new ResolvedSchema(productTableCols, Collections.emptyList(), UniqueConstraint.primaryKey(
  165. productPrimaryKey.getConstraintName(), productPrimaryKey.getColumnNames()));
  166. ResolvedCatalogTable productTable = new ResolvedCatalogTable(
  167. CatalogTable.of(productSchema, "", Collections.emptyList(), new HashMap<>()), productResolvedSchema);
  168. pathAndTable.add(Tuple2.of(new ObjectPath("test_db", "product"), productTable));
  169. return pathAndTable;
  170. }
  171. private static Map<String, RowDataDebeziumDeserializeSchema> getDebeziumDeserializeSchemas(
  172. final List<Tuple2<ObjectPath, ResolvedCatalogTable>> pathAndTable) {
  173. return pathAndTable.stream()
  174. .collect(toMap(e -> e.f0.toString(), e -> RowDataDebeziumDeserializeSchema.newBuilder()
  175. .setPhysicalRowType(
  176. (RowType) e.f1.getResolvedSchema().toPhysicalRowDataType().getLogicalType())
  177. .setUserDefinedConverterFactory(MySqlDeserializationConverterFactory.instance())
  178. .setMetadataConverters(
  179. new MetadataConverter[] {TABLE_NAME.getConverter(), DATABASE_NAME.getConverter()})
  180. .setResultTypeInfo(TypeInformation.of(RowData.class)).build()));
  181. }
  182. }

Mixed-Iceberg format

The following example will show how to write CDC data from multiple MySQL tables into the corresponding Mixed-Iceberg table.

Requirements

Please add Flink Connector MySQL CDC and Amoro dependencies to your Maven project’s pom.xml file.

  1. import com.netease.arctic.flink.InternalCatalogBuilder;
  2. import com.netease.arctic.flink.table.AmoroTableLoader;
  3. import com.netease.arctic.flink.util.AmoroUtils;
  4. import com.netease.arctic.flink.write.FlinkSink;
  5. import com.netease.arctic.table.TableIdentifier;
  6. import com.ververica.cdc.connectors.mysql.source.MySqlSource;
  7. import com.ververica.cdc.connectors.mysql.table.MySqlDeserializationConverterFactory;
  8. import com.ververica.cdc.debezium.DebeziumDeserializationSchema;
  9. import com.ververica.cdc.debezium.table.MetadataConverter;
  10. import com.ververica.cdc.debezium.table.RowDataDebeziumDeserializeSchema;
  11. import org.apache.flink.api.common.eventtime.WatermarkStrategy;
  12. import org.apache.flink.api.common.typeinfo.TypeInformation;
  13. import org.apache.flink.api.java.tuple.Tuple2;
  14. import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
  15. import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
  16. import org.apache.flink.streaming.api.functions.ProcessFunction;
  17. import org.apache.flink.table.api.*;
  18. import org.apache.flink.table.catalog.*;
  19. import org.apache.flink.table.data.RowData;
  20. import org.apache.flink.table.data.conversion.RowRowConverter;
  21. import org.apache.flink.table.data.utils.JoinedRowData;
  22. import org.apache.flink.table.types.logical.RowType;
  23. import org.apache.flink.util.Collector;
  24. import org.apache.flink.util.OutputTag;
  25. import org.apache.kafka.connect.data.Struct;
  26. import org.apache.kafka.connect.source.SourceRecord;
  27. import java.util.*;
  28. import java.util.stream.Collectors;
  29. import java.util.stream.Stream;
  30. import static com.ververica.cdc.connectors.mysql.table.MySqlReadableMetadata.DATABASE_NAME;
  31. import static com.ververica.cdc.connectors.mysql.table.MySqlReadableMetadata.TABLE_NAME;
  32. import static java.util.stream.Collectors.toMap;
  33. public class MySqlCDC2AmoroExample {
  34. public static void main(String[] args) throws Exception {
  35. List<Tuple2<ObjectPath, ResolvedCatalogTable>> pathAndTable = initSourceTables();
  36. Map<String, RowDataDebeziumDeserializeSchema> debeziumDeserializeSchemas = getDebeziumDeserializeSchemas(
  37. pathAndTable);
  38. MySqlSource<RowData> mySqlSource = MySqlSource.<RowData>builder()
  39. .hostname("yourHostname")
  40. .port(yourPort)
  41. .databaseList("test_db")
  42. // setting up tables to be captured
  43. .tableList("test_db.user", "test_db.product")
  44. .username("yourUsername")
  45. .password("yourPassword")
  46. .deserializer(new CompositeDebeziumDeserializationSchema(debeziumDeserializeSchemas))
  47. .build();
  48. StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  49. // enable checkpoint
  50. env.enableCheckpointing(60000);
  51. // Split CDC streams by table name
  52. SingleOutputStreamOperator<Void> process = env
  53. .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source").setParallelism(4)
  54. .process(new SplitCdcStreamFunction(pathAndTable.stream()
  55. .collect(toMap(e -> e.f0.toString(),
  56. e -> RowRowConverter.create(e.f1.getResolvedSchema().toPhysicalRowDataType())))))
  57. .name("split stream");
  58. // create Amoro sink and insert into cdc data
  59. InternalCatalogBuilder catalogBuilder = InternalCatalogBuilder.builder().metastoreUrl(
  60. "thrift://<ip>:<port>/<catalog_name_in_metastore>");
  61. Map<String, TableSchema> sinkTableSchemas = new HashMap<>();
  62. sinkTableSchemas.put("user", TableSchema.builder().field("id", DataTypes.INT())
  63. .field("name", DataTypes.STRING()).field("op_time", DataTypes.TIMESTAMP()).build());
  64. sinkTableSchemas.put("product", TableSchema.builder().field("productId", DataTypes.INT())
  65. .field("price", DataTypes.DECIMAL(12, 6)).field("saleCount", DataTypes.INT()).build());
  66. for (Map.Entry<String, TableSchema> entry : sinkTableSchemas.entrySet()) {
  67. TableIdentifier tableId =
  68. TableIdentifier.of("yourCatalogName", "yourDatabaseName", entry.getKey());
  69. AmoroTableLoader tableLoader = AmoroTableLoader.of(tableId, catalogBuilder);
  70. FlinkSink.forRowData(process.getSideOutput(new OutputTag<RowData>(entry.getKey()){}))
  71. .flinkSchema(entry.getValue())
  72. .table(AmoroUtils.loadAmoroTable(tableLoader))
  73. .tableLoader(tableLoader).build();
  74. }
  75. env.execute("Sync MySQL to Mixed-Iceberg table");
  76. }
  77. static class CompositeDebeziumDeserializationSchema
  78. implements DebeziumDeserializationSchema<RowData> {
  79. private final Map<String, RowDataDebeziumDeserializeSchema> deserializationSchemaMap;
  80. public CompositeDebeziumDeserializationSchema(
  81. final Map<String, RowDataDebeziumDeserializeSchema> deserializationSchemaMap) {
  82. this.deserializationSchemaMap = deserializationSchemaMap;
  83. }
  84. @Override
  85. public void deserialize(final SourceRecord record, final Collector<RowData> out)
  86. throws Exception {
  87. final Struct value = (Struct) record.value();
  88. final Struct source = value.getStruct("source");
  89. final String db = source.getString("db");
  90. final String table = source.getString("table");
  91. if (deserializationSchemaMap == null) {
  92. throw new IllegalStateException("deserializationSchemaMap can not be null!");
  93. }
  94. deserializationSchemaMap.get(db + "." + table).deserialize(record, out);
  95. }
  96. @Override
  97. public TypeInformation<RowData> getProducedType() {
  98. return TypeInformation.of(RowData.class);
  99. }
  100. }
  101. static class SplitCdcStreamFunction extends ProcessFunction<RowData, Void> {
  102. private final Map<String, RowRowConverter> converters;
  103. public SplitCdcStreamFunction(final Map<String, RowRowConverter> converterMap) {
  104. this.converters = converterMap;
  105. }
  106. @Override
  107. public void processElement(final RowData rowData,
  108. final ProcessFunction<RowData, Void>.Context ctx, final Collector<Void> out)
  109. throws Exception {
  110. // JoinedRowData like +I{row1=+I(1,2.340000,3), row2=+I(product,test_db)}
  111. // so rowData.getArity() - 2 is the tableName field index
  112. final String tableName = rowData.getString(rowData.getArity() - 2).toString();
  113. ctx.output(new OutputTag<RowData>(tableName) {},
  114. getField(JoinedRowData.class, (JoinedRowData) rowData, "row1"));
  115. }
  116. private static <O, V> V getField(Class<O> clazz, O obj, String fieldName) {
  117. try {
  118. java.lang.reflect.Field field = clazz.getDeclaredField(fieldName);
  119. field.setAccessible(true);
  120. Object v = field.get(obj);
  121. return v == null ? null : (V) v;
  122. } catch (NoSuchFieldException | IllegalAccessException e) {
  123. throw new RuntimeException(e);
  124. }
  125. }
  126. }
  127. private static List<Tuple2<ObjectPath, ResolvedCatalogTable>> initSourceTables() {
  128. List<Tuple2<ObjectPath, ResolvedCatalogTable>> pathAndTable = new ArrayList<>();
  129. // build table "user"
  130. Schema userSchema = Schema.newBuilder()
  131. .column("id", DataTypes.INT().notNull())
  132. .column("name", DataTypes.STRING())
  133. .column("op_time", DataTypes.TIMESTAMP())
  134. .primaryKey("id")
  135. .build();
  136. List<Column> userTableCols = Stream.of(
  137. Column.physical("id", DataTypes.INT().notNull()),
  138. Column.physical("name", DataTypes.STRING()),
  139. Column.physical("op_time", DataTypes.TIMESTAMP())).collect(Collectors.toList());
  140. Schema.UnresolvedPrimaryKey userPrimaryKey = userSchema.getPrimaryKey().orElseThrow(() -> new RuntimeException("table user required pk "));
  141. ResolvedSchema userResolvedSchema = new ResolvedSchema(userTableCols, Collections.emptyList(), UniqueConstraint.primaryKey(
  142. userPrimaryKey.getConstraintName(), userPrimaryKey.getColumnNames()));
  143. ResolvedCatalogTable userTable = new ResolvedCatalogTable(
  144. CatalogTable.of(userSchema, "", Collections.emptyList(), new HashMap<>()), userResolvedSchema);
  145. pathAndTable.add(Tuple2.of(new ObjectPath("test_db", "user"), userTable));
  146. // build table "product"
  147. Schema productSchema = Schema.newBuilder()
  148. .column("productId", DataTypes.INT().notNull())
  149. .column("price", DataTypes.DECIMAL(12, 6))
  150. .column("saleCount", DataTypes.INT())
  151. .primaryKey("productId")
  152. .build();
  153. List<Column> productTableCols = Stream.of(
  154. Column.physical("productId", DataTypes.INT().notNull()),
  155. Column.physical("price", DataTypes.DECIMAL(12, 6)),
  156. Column.physical("saleCount", DataTypes.INT())).collect(Collectors.toList());
  157. Schema.UnresolvedPrimaryKey productPrimaryKey = productSchema.getPrimaryKey().orElseThrow(() -> new RuntimeException("table product required pk "));
  158. ResolvedSchema productResolvedSchema = new ResolvedSchema(productTableCols, Collections.emptyList(), UniqueConstraint.primaryKey(
  159. productPrimaryKey.getConstraintName(), productPrimaryKey.getColumnNames()));
  160. ResolvedCatalogTable productTable = new ResolvedCatalogTable(
  161. CatalogTable.of(productSchema, "", Collections.emptyList(), new HashMap<>()), productResolvedSchema);
  162. pathAndTable.add(Tuple2.of(new ObjectPath("test_db", "product"), productTable));
  163. return pathAndTable;
  164. }
  165. private static Map<String, RowDataDebeziumDeserializeSchema> getDebeziumDeserializeSchemas(
  166. final List<Tuple2<ObjectPath, ResolvedCatalogTable>> pathAndTable) {
  167. return pathAndTable.stream()
  168. .collect(toMap(e -> e.f0.toString(), e -> RowDataDebeziumDeserializeSchema.newBuilder()
  169. .setPhysicalRowType(
  170. (RowType) e.f1.getResolvedSchema().toPhysicalRowDataType().getLogicalType())
  171. .setUserDefinedConverterFactory(MySqlDeserializationConverterFactory.instance())
  172. .setMetadataConverters(
  173. new MetadataConverter[] {TABLE_NAME.getConverter(), DATABASE_NAME.getConverter()})
  174. .setResultTypeInfo(TypeInformation.of(RowData.class)).build()));
  175. }
  176. }