flink cdc 应用
SQLServer
1. The db history topic or its content is fully or partially missing. Please check database history topic configuration and re-execute the snapshot.
遇到了一下问题,多次尝试,最终发现是数据库大小写要一致。
Caused by: io.debezium.DebeziumException: The db history topic or its content is fully or partially missing. Please check database history topic configuration and re-execute the snapshot.
at io.debezium.relational.HistorizedRelationalDatabaseSchema.recover(HistorizedRelationalDatabaseSchema.java:59) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at io.debezium.schema.HistorizedDatabaseSchema.recover(HistorizedDatabaseSchema.java:38) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.cdc.connectors.sqlserver.source.reader.fetch.SqlServerSourceFetchTaskContext.validateAndLoadDatabaseHistory(SqlServerSourceFetchTaskContext.java:187) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.cdc.connectors.sqlserver.source.reader.fetch.SqlServerSourceFetchTaskContext.configure(SqlServerSourceFetchTaskContext.java:130) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.cdc.connectors.base.source.reader.external.IncrementalSourceStreamFetcher.submitTask(IncrementalSourceStreamFetcher.java:84) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.cdc.connectors.base.source.reader.IncrementalSourceSplitReader.submitStreamSplit(IncrementalSourceSplitReader.java:261) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.cdc.connectors.base.source.reader.IncrementalSourceSplitReader.pollSplitRecords(IncrementalSourceSplitReader.java:153) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.cdc.connectors.base.source.reader.IncrementalSourceSplitReader.fetch(IncrementalSourceSplitReader.java:98) ~[flink-sql-connector-sqlserver-cdc-3.2.0.jar:3.2.0]
at org.apache.flink.connector.base.source.reader.fetcher.FetchTask.run(FetchTask.java:58) ~[flink-connector-files-1.20.0.jar:1.20.0]
at org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.runOnce(SplitFetcher.java:165) ~[flink-connector-files-1.20.0.jar:1.20.0]
at org.apache.flink.connector.base.source.reader.fetcher.SplitFetcher.run(SplitFetcher.java:117) ~[flink-connector-files-1.20.0.jar:1.20.0]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515) ~[?:?]
at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) ~[?:?]
CREATE TABLE Member_Extend (ID INT, MemberID INT, PRIMARY KEY (ID) NOT ENFORCED
) WITH (
'connector' = 'sqlserver-cdc',
'hostname' = '192.168.1.3',
'port' = '1433',
'username' = 'test',
'password' = 'test',
'database-name' = 'CrmExtend',
'table-name' = 'dbo.Member_Extend'
);
作业安全启停
show jobs
Flink SQL> show jobs;
+----------------------------------+------------------------------------------------------------------+----------+-------------------------+
| job id | job name | status | start time |
+----------------------------------+------------------------------------------------------------------+----------+-------------------------+
| ce5a0e938563cf52317c5b9055ad102f| testjob | RUNNING | 2024-11-15T03:38:47.919 |
+----------------------------------+------------------------------------------------------------------+----------+-------------------------+
4 rows in set
SET state.checkpoints.dir='s3://flink/cdc-1.20/savepoints';
stop job 'ce5a0e938563cf52317c5b9055ad102f' with savepoint;
Flink SQL> stop job 'ce5a0e938563cf52317c5b9055ad102f' with savepoint;
+--------------------------------------------------------------+
| savepoint path |
+--------------------------------------------------------------+
| s3://flink/cdc-1.20/savepoints/savepoint-ce5a0e-2935055bb307 |
+--------------------------------------------------------------+
1 row in set
SET execution.savepoint.path='s3://flink/cdc-1.20/savepoints/savepoiznt-ce5a0e-2935055bb307';
set 'execution.savepoint.ignore-unclaimed-state' = 'true';
重新执行原有sql
insert into flink_user select * from user ;