1、实验3 MapReduce编程初级实践实验3 MapReduce编程初级实践1. 实验目的1.通过实验掌握基本的MapReduce编程方法;2.掌握用MapReduce解决一些常见的数据处理问题,包括数据去重、数据排序和数据挖掘等。2. 实验平台已经配置完成的Hadoop伪分布式环境。3. 实验内容和要求1.编程实现文件合并和去重操作对于两个输入文件,即文件A和文件B,请编写MapReduce程序,对两个文件进行合并,并剔除其中重复的内容,得到一个新的输出文件C。下面是输入文件和输出文件的一个样例供参考。实验最终结果(合并的文件):代码如下:package com.Merge;import j
2、ava.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.Fi
3、leInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class Merge public static class Map extends Mapper private static Text text = new Text(); public void map(Object key, Text value, Context context) throws IOException, InterruptedException text = value; context.write(
4、text, new Text(); public static class Reduce extends Reducer public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException context.write(key, new Text(); public static void main(String args) throws Exception Configuration conf = new Configuration(); conf.set
5、(fs.defaultFS, hdfs:/localhost:9000); String otherArgs = new String input, output ; if (otherArgs.length != 2) System.err.println(Usage: Merge and duplicate removal ); System.exit(2); Job job = Job.getInstance(conf, Merge and duplicate removal); job.setJarByClass(Merge.class); job.setMapperClass(Map
6、.class); job.setReducerClass(Reduce.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs0); FileOutputFormat.setOutputPath(job, new Path(otherArgs1); System.exit(job.waitForCompletion(true) ? 0 : 1); 2. 编写程序实现对输入文件的排序现在有
7、多个输入文件,每个文件中的每行内容均为一个整数。要求读取所有文件中的整数,进行升序排序后,输出到一个新的文件中,输出的数据格式为每行两个整数,第一个数字为第二个整数的排序位次,第二个整数为原待排列的整数。下面是输入文件和输出文件的一个样例供参考。实验结果截图:代码如下:package com.MergeSort;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable
8、;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class Merge
9、Sort public static class Map extends Mapper private static IntWritable data = new IntWritable(); public void map(Object key, Text value, Context context) throws IOException, InterruptedException String line = value.toString(); data.set(Integer.parseInt(line); context.write(data, new IntWritable(1);
10、public static class Reduce extends Reducer private static IntWritable linenum = new IntWritable(1); public void reduce(IntWritable key, Iterable values, Context context) throws IOException, InterruptedException for (IntWritable val : values) context.write(linenum, key); linenum = new IntWritable(lin
11、enum.get() + 1); public static void main(String args) throws Exception Configuration conf = new Configuration(); conf.set(fs.defaultFS, hdfs:/localhost:9000); String otherArgs = new String input2, output2 ; /* 直接设置输入参数 */ if (otherArgs.length != 2) System.err.println(Usage: mergesort ); System.exit(
12、2); Job job = Job.getInstance(conf, mergesort); job.setJarByClass(MergeSort.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(otherArgs0); FileOutp
13、utFormat.setOutputPath(job, new Path(otherArgs1); System.exit(job.waitForCompletion(true) ? 0 : 1); 3. 对给定的表格进行信息挖掘下面给出一个child-parent的表格,要求挖掘其中的父子辈关系,给出祖孙辈关系的表格。实验最后结果截图如下:代码如下:package com.join;import java.io.IOException;import java.util.*;import org.apache.hadoop.conf.Configuration;import org.apach
14、e.hadoop.fs.Path;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.Reducer;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;pu
15、blic class STjoin public static int time = 0; public static class Map extends Mapper public void map(Object key, Text value, Context context) throws IOException, InterruptedException String child_name = new String(); String parent_name = new String(); String relation_type = new String(); String line
16、 = value.toString(); int i = 0; while (line.charAt(i) != ) i+; String values = line.substring(0, i), line.substring(i + 1) ; if (pareTo(child) != 0) child_name = values0; parent_name = values1; relation_type = 1; context.write(new Text(values1), new Text(relation_type + + + child_name + + + parent_n
17、ame); relation_type = 2; context.write(new Text(values0), new Text(relation_type + + + child_name + + + parent_name); public static class Reduce extends Reducer public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException if (time = 0) context.write(new Tex
18、t(grand_child), new Text(grand_parent); time+; int grand_child_num = 0; String grand_child = new String10; int grand_parent_num = 0; String grand_parent = new String10; Iterator ite = values.iterator(); while (ite.hasNext() String record = ite.next().toString(); int len = record.length(); int i = 2;
19、 if (len = 0) continue; char relation_type = record.charAt(0); String child_name = new String(); String parent_name = new String(); while (record.charAt(i) != +) child_name = child_name + record.charAt(i); i+; i = i + 1; while (i len) parent_name = parent_name + record.charAt(i); i+; if (relation_ty
20、pe = 1) grand_childgrand_child_num = child_name; grand_child_num+; else grand_parentgrand_parent_num = parent_name; grand_parent_num+; if (grand_parent_num != 0 & grand_child_num != 0) for (int m = 0; m grand_child_num; m+) for (int n = 0; n grand_parent_num; n+) context.write(new Text(grand_childm)
21、, new Text( grand_parentn); public static void main(String args) throws Exception Configuration conf = new Configuration(); conf.set(fs.defaultFS, hdfs:/localhost:9000); String otherArgs = new String input3, output3 ; if (otherArgs.length != 2) System.err.println(Usage: Single Table Join ); System.e
22、xit(2); Job job = Job.getInstance(conf, Single table join ); job.setJarByClass(STjoin.class); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs0); FileOutputFo
23、rmat.setOutputPath(job, new Path(otherArgs1); System.exit(job.waitForCompletion(true) ? 0 : 1); 4. 实验报告云计算 实验报告题目:MapReduce编程初级实践姓名包生友日期:2016/12/20实验环境:机房的虚拟机上配置好的环境解决问题的思路:根据老师给的代码进行操作实验内容与完成情况:已完成,与同学商量后仍有部分代码尚未知道其作用所在出现的问题:执行之后,出现未找到main函数情况,再次执行会报错,说文件已经存在。解决方案(列出遇到的问题和解决办法,列出没有解决的问题):问题:1.执行之后,出现未找到main函数情况 2. 再次执行会报错,说文件已经存在。解决办法:删除输出文件即可(程序执行时输出文件不能存在)5. 实验总结通过本次实验,使我掌握基本的MapReduce编程方法;掌握用MapReduce解决一些常见的数据处理问题,包括数据去重、数据排序和数据挖掘等。短暂的云计算课程实验到此结束,到我知道对云计算的学习是没有尽头的。
copyright@ 2008-2023 冰点文库 网站版权所有
经营许可证编号:鄂ICP备19020893号-2