마스터노드 = 네임노드

 

주요 설정파일

 

Read-only default configuration - core-default.xml, hdfs-default.xml, yarn-default.xml and mapred-default.xml
Site-specific configuration - etc/hadoop/core-site.xml, etc/hadoop/hdfs-site.xml, 

                                     etc/hadoop/yarn-site.xml and etc/hadoop/mapred-site.xml

 

hdfs 관련 : fs랑 io 시리즈들. 파일과 디렉토리 관리
yarn : yarn
mapred : 맵리듀스와 관련된 관리
나머지는 전부 core에서 관리한다.

 

하둡을 실행하면 여기있는 쓰레드들이 올라온다
Daemon
NameNode
DataNode
Secondary NameNode
ResourceManager
NodeManager
WebAppProxy
Map Reduce Job History Server

 

============================================================================

MapReduce Tutorial을 해보기

 

1. Eclipse에서 maven project 만들기

1-1. 자바프로젝트를 생성해서 Maven Project로 컨버트 해준다.

pom.xml이 같이 생성된다. 

 

1-2. pom.xml에 dependency 추가

<project xmlns="http://maven.apache.org/POM/4.0.0"
	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
	xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
	<modelVersion>4.0.0</modelVersion>
	<groupId>WordCount2_0712</groupId>
	<artifactId>WordCount2_0712</artifactId>
	<version>0.0.1-SNAPSHOT</version>
	<build>
		<sourceDirectory>src</sourceDirectory>
		<plugins>
			<plugin>
				<artifactId>maven-compiler-plugin</artifactId>
				<version>3.8.1</version>
				<configuration>
					<source>1.8</source>
					<target>1.8</target>
				</configuration>
			</plugin>
		</plugins>
	</build>
	<dependencies>
		<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-common</artifactId>
			<version>2.10.1</version>
		</dependency>
		
		<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs -->
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-hdfs</artifactId>
			<version>2.10.1</version>
			<scope>test</scope>
		</dependency>
		
		<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-core -->
<!-- 		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-core</artifactId>
			<version>1.2.1</version>
		</dependency> -->
		
		<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-core -->
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-mapreduce-client-core</artifactId>
			<version>2.10.1</version>
		</dependency>

	</dependencies>

</project>

 

2. MapReduce Tutorial

2-1. MapReduce Tutorial 소스코드 복사

package com.test;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;

public class WordCount2 {

	public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {

		static enum CountersEnum {
			INPUT_WORDS
		}

		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();

		private boolean caseSensitive;
		private Set<String> patternsToSkip = new HashSet<String>();

		private Configuration conf;
		private BufferedReader fis;

		// hdfs URL 검증 메소드
		@Override
		public void setup(Context context) throws IOException, InterruptedException {
			conf = context.getConfiguration();
			caseSensitive = conf.getBoolean("wordcount.case.sensitive", true);
			if (conf.getBoolean("wordcount.skip.patterns", false)) {
				URI[] patternsURIs = Job.getInstance(conf).getCacheFiles();
				for (URI patternsURI : patternsURIs) {
					Path patternsPath = new Path(patternsURI.getPath());
					String patternsFileName = patternsPath.getName().toString();
					parseSkipFile(patternsFileName);
				}
			}
		}

		// user_method 임 
		private void parseSkipFile(String fileName) {
			try {
				fis = new BufferedReader(new FileReader(fileName));
				String pattern = null;
				while ((pattern = fis.readLine()) != null) {
					patternsToSkip.add(pattern);
				}
			} catch (IOException ioe) {
				System.err.println(
						"Caught exception while parsing the cached file '" + StringUtils.stringifyException(ioe));
			}
		}

		@Override
		public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
			String line = (caseSensitive) ? value.toString() : value.toString().toLowerCase();
			for (String pattern : patternsToSkip) {
				line = line.replaceAll(pattern, "");
			}
			StringTokenizer itr = new StringTokenizer(line);
			while (itr.hasMoreTokens()) {
				word.set(itr.nextToken());
				context.write(word, one);
				Counter counter = context.getCounter(CountersEnum.class.getName(), CountersEnum.INPUT_WORDS.toString());
				counter.increment(1);
			}
		}
	}

	public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
		private IntWritable result = new IntWritable();

		// a [1,1,1] 
		// b [1,1]
		public void reduce(Text key, Iterable<IntWritable> values, Context context)
				throws IOException, InterruptedException {
			int sum = 0;
			for (IntWritable val : values) {
				sum += val.get();
			}
			result.set(sum);
			context.write(key, result);
		}
	}

	public static void main(String[] args) throws Exception {
		// 1. hdfs 환경설정 파일 정보 세팅 확인
		Configuration conf = new Configuration(); // core-site.xml을 제일먼저 읽어온다

		// 2. main(String[] args) 실행 구문을 받아서 GenericOptionsParse를 통해 입력받은 구문을 확인한다.
		GenericOptionsParser optionParser = new GenericOptionsParser(conf, args);

		// 3. 실행 구문을 리턴받아서 String[]로 리턴
		String[] remainingArgs = optionParser.getRemainingArgs();

		if ((remainingArgs.length != 2) && (remainingArgs.length != 4)) {
			System.err.println("Usage: wordcount <in> <out> [-skip skipPatternFile]");
			System.exit(2);
		}

		// 4. Job 실행 객체를 생성한다.
		Job job = Job.getInstance(conf, "word count"); // (환경설정파일, job_name(아무거나 가능))
		job.setJarByClass(WordCount2.class); // wc.jar 안에 main()을 가진 실행 클래스가 와야한다.

		job.setMapperClass(TokenizerMapper.class); // 4-1. 맵 실행하는 클래스 : 파일의 내용을 읽어서 단어로 분철 1

		job.setCombinerClass(IntSumReducer.class); // 4-2. []리스트로 그룹핑과 정렬값 a 2개 hi 1개 b 3개 => a [1,1], hi [1], b [1,1,1]
													// 이거는 원래 리듀스클래스로 적어줘야한다. 안그럼 오류남

		job.setReducerClass(IntSumReducer.class); // 4-3. a 2, hi 1, b 3

		// 5. MR작업이 끝나면 결과를 파일에 작성할 키와 밸류를 만들어서 실행
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);

		List<String> otherArgs = new ArrayList<String>();
		for (int i = 0; i < remainingArgs.length; ++i) {
			if ("-skip".equals(remainingArgs[i])) {
				job.addCacheFile(new Path(remainingArgs[++i]).toUri());
				job.getConfiguration().setBoolean("wordcount.skip.patterns", true);
			} else {
				otherArgs.add(remainingArgs[i]);
			}
		}
		FileInputFormat.addInputPath(job, new Path(otherArgs.get(0))); // /user/joe/wordcount/input
		FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1))); // /user/joe/wordcount/output

		System.exit(job.waitForCompletion(true) ? 0 : 1);
	}
}

 

2-2. Runnable jar 파일로 만들기

프로젝트 우클릭 - Export 해서 Runnable Jar File 선택해서 만들어준다.

'데이터과학자 - 강의 > hadoop & ecosystem' 카테고리의 다른 글

210716 Hadoop MapReduce Tutorial -2  (0) 2021.07.16
210713 Hadoop Namenode  (0) 2021.07.13
210710 Hadoop 설치 -2  (0) 2021.07.12
210709 Hadoop 설치 -1  (0) 2021.07.11
210708 - centOS 설치  (0) 2021.07.10

+ Recent posts