pyflink实时接收kafka数据至print

#!/usr/bin/python
# -*- coding: UTF-8 -*-



from pyflink.table import EnvironmentSettings, TableEnvironment

# 1. 创建 TableEnvironment
env_settings = EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
table_env = TableEnvironment.create(env_settings)

# 2. 创建 source 表
table_env.execute_sql("""
    CREATE TABLE datagen (
        id INT,
        name VARCHAR 
    ) WITH (
              'connector' = 'kafka',
              'topic' = 'flink_test10',
              'properties.bootstrap.servers' = '10.*.*.**:9092',
              'properties.group.id' = 'test_Print',
              'scan.startup.mode' = 'latest-offset',
              'format' = 'json'
            )
""")

# 3. 创建 sink 表
table_env.execute_sql("""
    CREATE TABLE print (
        id INT,
        name VARCHAR
    ) WITH (
        'connector' = 'print'
    )
""")

# 4. 查询 source 表,同时执行计算
# 通过 Table API 创建一张表:
source_table = table_env.from_path("datagen")
# 或者通过 SQL 查询语句创建一张表:
#source_table = table_env.sql_query("SELECT * FROM datagen")

result_table = source_table.select(source_table.id, source_table.name)
print("result tabel:",type(result_table))
#print("r data: ",result_table.to_pandas())

# 5. 将计算结果写入给 sink 表
# 将 Table API 结果表数据写入 sink 表:
result_table.execute_insert("print").wait()
# 或者通过 SQL 查询语句来写入 sink 表:
#table_env.execute_sql("INSERT INTO print SELECT * FROM datagen").wait()

参考文档:
https://help.aliyun.com/document_detail/181568.html
https://blog.****.net/chenshijie2011/article/details/117399883
https://blog.****.net/chenshijie2011/article/details/117401621
https://www.cnblogs.com/maoxiangyi/p/13509782.html
https://www.cnblogs.com/Springmoon-venn/p/13726089.html
https://www.jianshu.com/p/295066a24092
https://blog.****.net/m0_37592814/article/details/108044830

上一篇:小红书消息中间件的运维实践与治理之路


下一篇:MYSQL基础语法