天天看点

hadoop下运行WordCout单词计数抛出错误

我想每个人都怀揣着激动的心运行hadoop界的helloword--WordCount程序,可是等了许久后才在控制台中看到了一串红色的,我想每个搞习惯了eclipse的人都不会很陌生,这就是让我们又爱又恨的错误,爱的是终于知道 是哪错了,恨的咬牙切齿的是这是一个错误,意味着我们的程序出嘻嘻了,有错,下面便是抛出的异常:

Exception in thread"main"java.lang.UnsatisfiedLinkError:org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)

经过各方查找终于找出了错误,是由于NativeIO.java包中抛出了异常,接下来便是解决问题的时刻到了,我们先找到NativeIO.java,存放在hadoop-2.x.x下面-src.tar.gz,解压,hadoop-2.x.x.src\hadoop-common-project\hadoop-common\src\main\java\org\apache\hadoop\io\nativeio下NativeIO.java,修改557,行,就返回值改为return true,然后在将NativeIO.java复制到工程下

hadoop下运行WordCout单词计数抛出错误

,运行,便大功告成了

最后附上源码:

package com.pqb;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;


/**
 * 
 * @author pqb
 * Object:输入数据的具体内容
 * Text:每行的文本数据
 * Text:每个单词统计后的数据
 * IntWritable:输出记录的结果
 *
 */
public class WordCount {
	public static class WordCountMapper extends Mapper<Object,Text, Text, IntWritable>{
		protected void map(Object key, Text value,Mapper<Object,Text,Text,IntWritable>.Context context) 
			throws IOException ,InterruptedException {
			String lineContent=value.toString();
			String result [] =lineContent.split(" ");
			for (int i = 0; i < result.length; i++) {
				context.write(new Text(result[i]), new IntWritable(1));
			}
		};
	}
	/**
	 * 
	 * @author pqb
	 *Text:map输出后的内容
	 *IntWritable:map输出的个数
	 */
	public static class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
		protected void reduce(Text key, Iterable<IntWritable> values, Reducer<Text,IntWritable,Text,IntWritable>.Context context) 
		throws IOException ,InterruptedException {
			int sum=0;
			for(IntWritable count : values){
				sum+=count.get();
			}
			context.write(key,new IntWritable(sum));
		};
	}
	public static void main(String[] args) throws Exception {
		//System.out.println(args[0]+"\n"+args[1]);
		/*if(args.length!=2){
			System.err.println("输入参数有误");
			System.exit(1);
		}*/
		Configuration conf=new Configuration();//进行相关配置使用
		
		//String [] argArray=new GenericOptionsParser(conf,args).getRemainingArgs();
		
		Job job=Job.getInstance(conf,"hadoop");//定义一个hadopp作业
		job.setJarByClass(WordCount.class);//设置执行的jar文件的程序类
		job.setMapperClass(WordCountMapper.class);//指定mapper的处理类
		job.setCombinerClass(WordCountReducer.class);
		job.setMapOutputKeyClass(Text.class);// 设置输出的key的类型
		job.setMapOutputValueClass(IntWritable.class);//设置输出的value的类型
		
		job.setReducerClass(WordCountReducer.class);//设置reduce操作的处理类
		
		job.setOutputKeyClass(Text.class);//信息设置为文本
		job.setOutputValueClass(IntWritable.class);
		
		FileInputFormat.addInputPath(job,new Path("hdfs://192.168.141.128:9000/input01/a.txt"));
		FileOutputFormat.setOutputPath(job,new Path("hdfs://192.168.141.128:9000/output"));
		
		System.exit(job.waitForCompletion(true)?0:1);
	}
	
}
           

如有不懂的地方请联系qq:2229176653

继续阅读