1.将MySQL数据采集到hdfs
mysql> create database sqoop;
Query OK, 1 row affected (0.00 sec)
mysql> use sqoop;
Database changed
mysql> create table stu(sid bigint primary key auto_increment,sname varchar(80));
Query OK, 0 rows affected (0.01 sec)
mysql> insert into stu values(null,'zhangsan');
Query OK, 1 row affected (0.00 sec)
mysql> insert into stu values(null,'lisi');
Query OK, 1 row affected (0.00 sec)
mysql> grant all privileges on *.* to 'root'@'%' identified by '123456'
-> ;
Query OK, 0 rows affected, 1 warning (0.01 sec)
sqoop import --connect jdbc:mysql://192.168.67.1:3306/sqoop --username root --password 123456 --table stu -m 1 --split-by sid --fields-terminated-by '#' --target-dir '/usr/local/soft/hive-3.1.2/data/td2' --delete-target-dir
2.将MySQL数据采集到hive
nohup hive --service metastore >> metastore.log 2>&1 &
hive
create table stu(sid bigint,sname string) row format delimited fields terminated by '\t';
sqoop import --connect jdbc:mysql://192.168.67.1:3306/sqoop --username root --password 123456 --table stu --fields-terminated-by "\t" --lines-terminated-by "\n" --hive-import --hive-overwrite --hive-table default.stu --delete-target-dir
3.将hive数据采集到MySQL
sqoop export --connect jdbc:mysql://192.168.67.1:3306/sqoop --username root --password 123456 --export-dir '/user/hive/warehouse/stu' --table stu -m 1 --fields-terminated-by '\t'