eclipse 中Hadoop出错的问题,求解

昨天发了一次,但还想发一次。。。。。。。
目的是在eclipse中统计hdfs中文件中出相同字符串出现的次数:
昨天上午运行可以正常运行,下午就不行了,日志也看不懂,特来请教大佬
hdfs中用来存放输出文件夹result被成功创建,但其下的输出文件并未被创建

mapper类:
package cn.test;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WCMapper extends Mapper<LongWritable, Text, Text, LongWritable>{
protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
String [] words =  value.toString().split(" ");
for (String word : words) {
context.write(new Text(word),new LongWritable(1));
}
}
}

reducer类:
package cn.test;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class WCReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
protected void reduce(Text key, Iterable<LongWritable> values,
Reducer<Text, LongWritable, Text, LongWritable>.Context context) throws IOException, InterruptedException {
Iterator<LongWritable> vals = values.iterator();
long count = 0;
while(vals.hasNext()){
count += vals.next().get();
}
context.write(key, new LongWritable(count));
}
}

driver类:
package cn.test;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WCDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job  job = Job.getInstance(conf,"wcJob");
job.setJarByClass(WCDriver.class);
job.setMapperClass(WCMapper.class);
job.setReducerClass(WCReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.80.101:9000/111"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.80.101:9000/park/result"));
job.waitForCompletion(true);
}
}


日志:
[INFO ] 2019-05-23 11:44:53,162 method:org.apache.hadoop.conf.Configuration.warnOnceIfDeprecated(Configuration.java:1173)
session.id is deprecated. Instead, use dfs.metrics.session-id
[INFO ] 2019-05-23 11:44:53,178 method:org.apache.hadoop.metrics.jvm.JvmMetrics.init(JvmMetrics.java:76)
Initializing JVM Metrics with processName=JobTracker, sessionId=
[WARN ] 2019-05-23 11:44:54,839 method:org.apache.hadoop.mapreduce.JobResourceUploader.uploadFiles(JobResourceUploader.java:64)
Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
[WARN ] 2019-05-23 11:44:54,983 method:org.apache.hadoop.mapreduce.JobResourceUploader.uploadFiles(JobResourceUploader.java:171)
No job jar file set.  User classes may not be found. See Job or Job#setJar(String).
[INFO ] 2019-05-23 11:44:55,317 method:org.apache.hadoop.mapreduce.lib.input.FileInputFormat.listStatus(FileInputFormat.java:283)
Total input paths to process : 1
[INFO ] 2019-05-23 11:44:55,433 method:org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:198)
number of splits:1
[INFO ] 2019-05-23 11:44:55,823 method:org.apache.hadoop.mapreduce.JobSubmitter.printTokens(JobSubmitter.java:287)
Submitting tokens for job: job_local223170_0001
[INFO ] 2019-05-23 11:44:56,360 method:org.apache.hadoop.mapreduce.Job.submit(Job.java:1294)
The url to track the job: http://localhost:8080/
[INFO ] 2019-05-23 11:44:56,362 method:org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1339)
Running job: job_local223170_0001
[INFO ] 2019-05-23 11:44:56,371 method:org.apache.hadoop.mapred.LocalJobRunner$Job.createOutputCommitter(LocalJobRunner.java:471)
OutputCommitter set in config null
[INFO ] 2019-05-23 11:44:56,386 method:org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.<init>(FileOutputCommitter.java:100)
File Output Committer Algorithm version is 1
[INFO ] 2019-05-23 11:44:56,396 method:org.apache.hadoop.mapred.LocalJobRunner$Job.createOutputCommitter(LocalJobRunner.java:489)
OutputCommitter is org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
[INFO ] 2019-05-23 11:44:56,577 method:org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:448)
Waiting for map tasks
[INFO ] 2019-05-23 11:44:56,578 method:org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:224)
Starting task: attempt_local223170_0001_m_000000_0
[INFO ] 2019-05-23 11:44:56,646 method:org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.<init>(FileOutputCommitter.java:100)
File Output Committer Algorithm version is 1
[INFO ] 2019-05-23 11:44:56,659 method:org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.isAvailable(ProcfsBasedProcessTree.java:192)
ProcfsBasedProcessTree currently is supported only on Linux.
[INFO ] 2019-05-23 11:44:56,793 method:org.apache.hadoop.mapred.Task.initialize(Task.java:612)
Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@7586417
[INFO ] 2019-05-23 11:44:56,804 method:org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:756)
Processing split: hdfs://192.168.80.101:9000/111/7.txt:0+98
[INFO ] 2019-05-23 11:44:57,360 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.setEquator(MapTask.java:1205)
(EQUATOR) 0 kvi 26214396(104857584)
[INFO ] 2019-05-23 11:44:57,384 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.init(MapTask.java:998)
mapreduce.task.io.sort.mb: 100
[INFO ] 2019-05-23 11:44:57,392 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.init(MapTask.java:999)
soft limit at 83886080
[INFO ] 2019-05-23 11:44:57,393 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.init(MapTask.java:1000)
bufstart = 0; bufvoid = 104857600
[INFO ] 2019-05-23 11:44:57,395 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.init(MapTask.java:1001)
kvstart = 26214396; length = 6553600
[INFO ] 2019-05-23 11:44:57,439 method:org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1360)
Job job_local223170_0001 running in uber mode : false
[INFO ] 2019-05-23 11:44:57,448 method:org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1367)
map 0% reduce 0%
[INFO ] 2019-05-23 11:44:57,467 method:org.apache.hadoop.mapred.MapTask.createSortingCollector(MapTask.java:403)
Map output collector class = org.apache.hadoop.mapred.MapTask$MapOutputBuffer
[INFO ] 2019-05-23 11:44:58,552 method:org.apache.hadoop.mapred.LocalJobRunner$Job.statusUpdate(LocalJobRunner.java:591)

[INFO ] 2019-05-23 11:44:58,562 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1460)
Starting flush of map output
[INFO ] 2019-05-23 11:44:58,563 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1482)
Spilling map output
[INFO ] 2019-05-23 11:44:58,563 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1483)
bufstart = 0; bufend = 220; bufvoid = 104857600
[INFO ] 2019-05-23 11:44:58,564 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1485)
kvstart = 26214396(104857584); kvend = 26214336(104857344); length = 61/6553600
[INFO ] 2019-05-23 11:44:58,636 method:org.apache.hadoop.mapred.MapTask$MapOutputBuffer.sortAndSpill(MapTask.java:1667)
Finished spill 0
[INFO ] 2019-05-23 11:44:58,648 method:org.apache.hadoop.mapred.Task.done(Task.java:1038)
Task:attempt_local223170_0001_m_000000_0 is done. And is in the process of committing
[INFO ] 2019-05-23 11:44:58,691 method:org.apache.hadoop.mapred.LocalJobRunner$Job.statusUpdate(LocalJobRunner.java:591)
map
[INFO ] 2019-05-23 11:44:58,692 method:org.apache.hadoop.mapred.Task.sendDone(Task.java:1158)
Task 'attempt_local223170_0001_m_000000_0' done.
[INFO ] 2019-05-23 11:44:58,693 method:org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:249)
Finishing task: attempt_local223170_0001_m_000000_0
[INFO ] 2019-05-23 11:44:58,699 method:org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:456)
map task executor complete.
[INFO ] 2019-05-23 11:44:58,709 method:org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:448)
Waiting for reduce tasks
[INFO ] 2019-05-23 11:44:58,709 method:org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:302)
Starting task: attempt_local223170_0001_r_000000_0
[INFO ] 2019-05-23 11:44:58,741 method:org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.<init>(FileOutputCommitter.java:100)
File Output Committer Algorithm version is 1
[INFO ] 2019-05-23 11:44:58,744 method:org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.isAvailable(ProcfsBasedProcessTree.java:192)
ProcfsBasedProcessTree currently is supported only on Linux.
[INFO ] 2019-05-23 11:44:58,893 method:org.apache.hadoop.mapred.Task.initialize(Task.java:612)
Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@650291a
[INFO ] 2019-05-23 11:44:58,902 method:org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:362)
Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@12c3d0cd
[INFO ] 2019-05-23 11:44:58,934 method:org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.<init>(MergeManagerImpl.java:196)
MergerManager: memoryLimit=1138071936, maxSingleShuffleLimit=284517984, mergeThreshold=751127488, ioSortFactor=10, memToMemMergeOutputsThreshold=10
[INFO ] 2019-05-23 11:44:58,940 method:org.apache.hadoop.mapreduce.task.reduce.EventFetcher.run(EventFetcher.java:61)
attempt_local223170_0001_r_000000_0 Thread started: EventFetcher for fetching Map Completion Events
[INFO ] 2019-05-23 11:44:59,026 method:org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:456)
reduce task executor complete.
[WARN ] 2019-05-23 11:44:59,091 method:org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:560)
job_local223170_0001
java.lang.Exception: org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError: error in shuffle in localfetcher#1
at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:529)
Caused by: org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError: error in shuffle in localfetcher#1
at org.apache.hadoop.mapreduce.task.reduce.Shuffle.run(Shuffle.java:134)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:376)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.io.FileNotFoundException: E:/tmp/hadoop-少%20年/mapred/local/localRunner/??%20?ê/job***/job_local223170_0001/attempt_local223170_0001_m_000000_0/output/file.out.index
at org.apache.hadoop.fs.RawLocalFileSystem.open(RawLocalFileSystem.java:200)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:767)
at org.apache.hadoop.io.SecureIOUtils.openFSDataInputStream(SecureIOUtils.java:156)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:70)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:62)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:57)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.copyMapOutput(LocalFetcher.java:124)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.doCopy(LocalFetcher.java:102)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.run(LocalFetcher.java:85)
[INFO ] 2019-05-23 11:44:59,455 method:org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1367)
map 100% reduce 0%
[INFO ] 2019-05-23 11:44:59,458 method:org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1380)
Job job_local223170_0001 failed with state FAILED due to: NA
[INFO ] 2019-05-23 11:44:59,484 method:org.apache.hadoop.mapreduce.Job.monitorAndPrintJob(Job.java:1385)
Counters: 22
File System Counters
FILE: Number of bytes read=152
FILE: Number of bytes written=275066
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=98
HDFS: Number of bytes written=0
HDFS: Number of read operations=5
HDFS: Number of large read operations=0
HDFS: Number of write operations=1
Map-Reduce Framework
Map input records=8
Map output records=16
Map output bytes=220
Map output materialized bytes=258
Input split bytes=101
Combine input records=0
Spilled Records=16
Failed Shuffles=0
Merged Map outputs=0
GC time elapsed (ms)=13
Total committed heap usage (bytes)=258473984
File Input Format Counters
Bytes Read=98
#Java#
全部评论
把运行产生的数据删了才能重新跑
点赞 回复
分享
发布于 2019-05-23 12:22

相关推荐

点赞 收藏 评论
分享
牛客网
牛客企业服务