1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78 |
import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import
org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import
org.apache.hadoop.util.GenericOptionsParser; public
class Test{ public
static class TokenizerMapper extends
Mapper<Object, Text, Text, IntWritable>{ private
final static IntWritable one = new
IntWritable( 1 ); private
Text word = new
Text(); public
void map(Object key, Text value, Context context ) throws
IOException, InterruptedException { StringTokenizer itr = new
StringTokenizer(value.toString()); while
(itr.hasMoreTokens()) { word.set(itr.nextToken()); context.write(word, one); } } } public
static class IntSumReducer extends
Reducer<Text,IntWritable,Text,IntWritable>{ private
IntWritable result = new
IntWritable(); public
void reduce(Text key, Iterable<IntWritable> values, Context context ) throws
IOException, InterruptedException { int
sum = 0 ; for
(IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } public
static void main(String[] args) throws
Exception { //configuration for the job Configuration conf = new
Configuration(); conf.addResource( new
Path( "/usr/local/hadoop/etc/hadoop/core-site.xml" )); conf.addResource( new
Path( "/usr/local/hadoop/etc/hadoop/hdfs-site.xml" )); Job job = new
Job(conf, "word count" ); job.setJarByClass(Test. class ); //set the mapper, combiner, reducer job.setMapperClass(TokenizerMapper. class ); job.setCombinerClass(IntSumReducer. class ); job.setReducerClass(IntSumReducer. class ); //the key is type of Text job.setOutputKeyClass(Text. class ); //the value is type of IntWritable job.setOutputValueClass(IntWritable. class ); String[] otherArgs = new
GenericOptionsParser(conf, args).getRemainingArgs(); otherArgs = new
String[ 2 ]; otherArgs[ 0 ]= "/input" ; otherArgs[ 1 ]= "/output" ; if
(otherArgs.length != 2 ) { System.err.println( "Usage: wordcount <in> <out>" ); System.exit( 2 ); } FileInputFormat.addInputPath(job, new
Path(otherArgs[ 0 ])); FileOutputFormat.setOutputPath(job, new
Path(otherArgs[ 1 ])); System.out.println( "Hello World" ); System.exit(job.waitForCompletion( true ) ? 0
: 1 ); } } |
Hadoop WordCount 小析,布布扣,bubuko.com
原文:http://www.cnblogs.com/rambot/p/3622130.html