[root@hadoop104 ~]# impala-shell -i hadoop105
Starting Impala Shell without Kerberos authentication
Connected to hadoop105:21000
create table student(id int, name string)
row format delimited fields terminated by "\t";
desc formatted student;
insert into table student select。。。
insert into table student values(1001, "alex"), (1002, "kris"); ##into是往后边追加;在HDFS上是追加到一个新文件;
insert overwrite table student values (1003, "jing"), (1004, "zk"); ##它会把之前的内容给覆盖掉;
shell -e+sql语句 -f+sql脚本
[root@hadoop104 ~]# impala-shell -help
impala-shell -i hadoop105 -q ‘select * from student‘;
vim impala.sql
select * from student; ##可以插入多条语句
impala-shell -i hadoop105 -f impala.sql
impala-shell -i hadoop105 -f impala.sql -o resulet.txt
impala-shell -i hadoop105 -f impala.sql -o resulet.txt -B
-B格式化,tab分隔符;
impala-shell -i hadoop105 -f impala.sql -o resulet.txt -B --print_header ##把标题头打印出来
impala-shell -i hadoop105 -f impala.sql -o resulet.txt -B --print_header --output_delimiter=‘|‘; 分隔符
--verbose 默认true,打印信息;
[hadoop105:21000] > select * from student;
Query: select * from student
Query submitted at: 2019-02-26 18:33:32 (Coordinator: http://hadoop105:25000)
Query progress can be monitored at: http://hadoop105:25000/query_plan?query_id=154d54075a0ed555:f84f23ee00000000
+------+------+
| id | name |
+------+------+
| 1003 | jing |
| 1004 | zk |
| 1002 | kris |
+------+------+
impala-shell -i hadoop105 --quiet 不打印详细信息;本次有效,重启下就还是默认的;
[hadoop105:21000] > select * from student;
+------+------+
| id | name |
+------+------+
| 1002 | kris |
| 1003 | jing |
| 1004 | zk |
+------+------+
[root@hadoop104 ~]# impala-shell -i hadoop105 -v #版本
Impala Shell v2.9.0-cdh5.12.1 (5131a03) built on Thu Aug 24 09:27:32 PDT 2017
impala-shell -i hadoop105 -f impala.sql
impala-shell -i hadoop105 -f impala.sql -c #-c是忽略查询错误,直接跳过去执行其他的;错误的那行必须加;不然后边正确的也执行不了;
从hive中先student表中插入一条语句:
select * from student;
quit;
impala-shell -i hadoop105 -r ##--refresh_after_connect;所有表数据都会更新
select * from student;
-d DEFAULT_DB, --database=DEFAULT_DB
impala-shell -i hadoop105 -p, --show_profiles
select * from student;
详细底层信息; hive的执行计划explain
查看执行计划,也可以对集群进行优化 explain select * from student; #-p更详细; profile; 跟-p是一样的;直接打印出profile,它显示出上一个命令的详细信息 [hadoop105:21000] > select * from student profile; hive> dfs -ls /; Found 3 items -rw-r--r-- 3 root supergroup 346 2019-02-26 08:51 /log4j.log drwxrwxrwt - hdfs supergroup 0 2019-02-25 22:45 /tmp drwxrwxrwx - hdfs supergroup 0 2019-02-26 10:04 /user 0: jdbc:hive2://hadoop104:10000> !sh hadoop fs -ls / ##hdfs上的文件信息 Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=512M; support was removed in 8.0 Found 3 items -rw-r--r-- 3 root supergroup 346 2019-02-26 08:51 /log4j.log drwxrwxrwt - hdfs supergroup 0 2019-02-25 22:45 /tmp drwxrwxrwx - hdfs supergroup 0 2019-02-26 10:04 /user 0: jdbc:hive2://hadoop104:10000> !sh ls ./ #当前linux目录下的文件 impala.sql log4j.log result.txt student.txt 公共的 模板 视频... [hadoop105:21000] > ! ls ./; #当前linux目录下;跟linux命令是一样的; impala.sql log4j.log result.txt student.txt 公共的 模板 视频 图片 文档 下载 音乐 桌面 bleeing 0: jdbc:hive2://hadoop104:10000> help ##它的命令都是带有!的; !help !quiet !sh hadoop fs -ls / ##查看本地linux中的目录文件 !sh ls / shell跟进入内部!sh一样 [hadoop105:21000] > shell hadoop fs -ls /; ##查看hdfs上文件;查看本地直接跟linux系统的命令 version; ##版本要一致或要兼容 Impala Shell和server version要兼容 connect hadoop106;跟-i是一样的 history; set COMPRESSION_CODEC=gzip; unset COMPRESSION_CODEC;
set refresh student;
创建数据库
Impala不支持alter database语法 创建数据库: 不支持 with dbproperties数据库的属性,但hive支持WITH DBPROPERTIES ("creator"="ruoze", "date"="2018-08-08"); [hadoop105:21000] > create database impaladb comment "impala_db" location ‘/impala_db‘; Query: create database impaladb comment "impala_db" location ‘/impala_db‘ location ‘/‘ 不要这样写;删除时会把根目录下的全部删掉; drop database impala_db; #正在使用的库不能删,有数据的也删不了,要加cascade;
原文:https://www.cnblogs.com/shengyang17/p/10440134.html