编写第一个hadoop3.3.6的mapreduce程序
hadoop还是用的上个伪分布环境。
hadoop安装在龙蜥anolis8.9上,开发是在windows下。
1、windows下首先要下载hadoop的包,hadoop-3.3.6.tar.gz,比如我的解压到d:\java\hadoop-3.3.6中。
配置环境:HADOOP_HOME,内容为:D:\java\hadoop-3.3.6
2、到项目https://github.com/cdarlint/winutils/tree/master/hadoop-3.3.6/bin中,下载:
hadoop.dll、winutils.exe
放到d:\java\hadoop-3.3.6\bin中。
同时将路径D:\java\hadoop-3.3.6\bin放到PATH中。
1、创建maven工程。
pom.xml文件内容:
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.rainpet</groupId>
<artifactId>MapReduceDemo</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>hadoop-test</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>3.3.6</hadoop.version>
<!-- Logger -->
<lg4j2.version>2.12.1</lg4j2.version>
</properties>
<dependencies>
<dependency>
<!-- 客户端版本,最好和集群版本一致 -->
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.3.6</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
<version>3.3.6</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-client</artifactId>
<version>3.3.6</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.3.6</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.36</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.4</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>com.rainpet.MapReduceDemo.WordCountDriver</mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
2、创建包及文件:com.rainpet.MapReduceDemo.WordCountDriver.java
内容为:
package com.rainpet.MapReduceDemo;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/*
* WordCount案例Driver代码
* 1、获取job
* 2、设置jar包路径
* 3、关联Mapper和Reducer
* 4、设置Map输出kv类型
* 5、设置最终输出kv类型
* 6、设置输入路径
* 7、设置输出路径
* 8、提交任务
*/
public class WordCountDriver {
public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException {
// 1、获取job
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://master:8020");
//conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
Job job = Job.getInstance(conf);
// 2、设置jar包路径
job.setJarByClass(WordCountDriver.class);
// 3、关联Mapper和Reducer
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4、设置Map输出kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5、设置最终输出kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6、设置输入地址
FileInputFormat.setInputPaths(job, new Path(args[0]));
// 7、设置输出路径
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 8、提交任务
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
3、创建文件:
com.rainpet.MapReduceDemo.WordCountMapper.java
内容为:
package com.rainpet.MapReduceDemo;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/*
* WordCount案例Map阶段代码
* KEYIN, Map阶段输入K的类型:LongWritable
* VALUEIN, Map阶段输入V的类型:Text
* KEYOUT, Map阶段输出K的类型:Text
* VALUEOUT,Map阶段输出K的类型:IntWritable
*/
// Map阶段继承Mapper类
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
// 定义输出kv对的数据类型
Text outK = new Text();
IntWritable outV = new IntWritable(1);
@Override
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
// 将输入的数据转换成String类型
String line = value.toString();
// 将数据按空格切分
String[] words = line.split(" ");
for (String word : words) {
outK.set(word);
context.write(outK, outV);
}
}
}
4、创建包及文件:
com.rainpet.MapReduceDemo.WordCountReducer.java
内容为:
package com.rainpet.MapReduceDemo;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/*
* WordCount案例Reduce阶段代码
* KEYIN, Reduce阶段输入K的类型:Text
* VALUEIN, Reduce阶段输入V的类型:IntWritable
* KEYOUT, Reduce阶段输出K的类型:Text
* VALUEOUT,Reduce阶段输出K的类型:IntWritable
*/
// Reduce阶段继承Reducer类
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
// 定义Reduce阶段输出值的类型
IntWritable outV = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {
// TODO Auto-generated method stub
// 统计单词的个数
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
outV.set(sum);
context.write(key, outV);
}
}
5、修改工程的jdk兼容性为jdk1.8
java编译器字节码的版本也为1.8。
6、编译,打包,得到文件:
MapReduceDemo-0.0.1-SNAPSHOT.jar
7、上传到hadoop服务器
执行命令:
hadoop jar MapReduceDemo-0.0.1-SNAPSHOT.jar /user/input /user/output2
正常情况就可以得到结果了。
数据也是使用了上次使用的一个数据文件:/user/input/1.txt
8、查看结果
hdfs dfs -cat /user/output2/*
9、输出文件删除
hdfs dfs -rm -R /user/output2
10、windows下运行的话,可能会遇到这个问题:
Permission denied: user=administrator, access=WRITE
方法是:在系统的环境变量里面添加HADOOP_USER_NAME=hadoop
这个问题原文:
https://blog.csdn.net/zhangjunli/article/details/106321516
如有其他问题解决,再更新文章。