开发者学堂课程【Hadoop 分布式计算框架 MapReduc:MapJoin 案例缓存文件处理】学习笔记,与课程紧密联系,让用户快速学习知识。
课程地址:https://developer.aliyun.com/learning/course/94/detail/1551
MapJoin 案例缓存文件处理
内容介绍:
(1)先在驱动模块中添加缓存文件
(2)读取缓存的文件数据
(1) 先在驱动模块中添加缓存文件
代码示例:
import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class DistributedCacheDriver {
public static void main(String[] args) throws Exception, IOException { //0根据自己电脑路径重新配置args = new string[ ] { "e:/input/inputtable2", "e :/output1"}; // 1 获取job信息 Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration);
// 2 设置加载jar包路径 job.setJarByClass(DistributedCacheDriver.class);
// 3 关联map job.setMapperClass(DistributedCacheMapper.class);
// 4 设置最终输出数据类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class);
// 5 设置输入输出路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 6 加载缓存数据 job.addCacheFile(new URI("file:///e:/input/inputcache/pd.txt"));
// 7 Map端Join的逻辑不需要Reduce阶段,设置reduceTask数量为0 job.setNumReduceTasks(0);
// 8 提交 boolean result = job.waitForCompletion(true); System.exit(result ? 0 : 1);
} } |
|
(2)读取缓存的文件数据
代码示例:
import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.net.URI;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;
public class DistributedCacheMapper extends Mapper<LongWritable, Text, Text, NullWritable> {
@Override protected void setup(Mapper<LongWritable, Text, Text, NullWritable>.Context context) throws IOException, InterruptedException { // 缓存小表 URI[] cacheFiles = context.getCacheFiles(); String path = cacheFiles[0].getPath().toString();
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(path), "UTF-8"));
String line; while (StringUtils.isNotEmpty(line = reader.readLine())) {
// 切割 String[] fileds = line.split("\t");
pdMap.put(fileds[0], fileds[1]); }
// 关闭资源 IOUtils.closeStream(reader); }
@Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 获取一行 String line = value.toString();
// 切割 String[] fileds = line.split("\t");
// 获取pid String pid = fileds[1];
// 获取pname String pname = pdMap.get(pid); System.out.println(pdMap.get("01")); // 拼接 line = line + "\t" + pname;
k.set(line);
// 写出 context.write(k, NullWritable.get()); } } |