Hadoop无法处理中文问题解决方案

由于Hadoop默认编码为UTF-8,并且将UTF-8进行了硬编码,所以我们在处理中文时需要重写OutputFormat类。方法为:

1、新建类GBKFileOutputFormat,代码如下:
import java.io.DataOutputStream; 
import java.io.IOException; 
import java.io.UnsupportedEncodingException; 
 
import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.FileSystem; 
import org.apache.hadoop.fs.Path; 
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.mapreduce.lib.*;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 
import org.apache.hadoop.io.NullWritable; 
import org.apache.hadoop.io.Text; 
import org.apache.hadoop.io.compress.CompressionCodec; 
import org.apache.hadoop.io.compress.GzipCodec; 
import org.apache.hadoop.mapreduce.OutputFormat; 
import org.apache.hadoop.mapreduce.RecordWriter; 
import org.apache.hadoop.mapreduce.TaskAttemptContext; 
import org.apache.hadoop.util.*; 
 
/** An {@link OutputFormat} that writes plain text files. */ 
public class GBKFileOutputFormat<K, V> extends FileOutputFormat<K, V> {//TextInputFormat是默认的输出文件格式 
  protected static class LineRecordWriter<K, V>//默认 
    extends RecordWriter<K, V> { 
    private static final String utf8 = "GBK";  //硬编码,将“UTF-8”改为“GBK” 
    private static final byte[] newline;//行结束符? 
    static { 
      try { 
        newline = "\n".getBytes(utf8); 
      } catch (UnsupportedEncodingException uee) { 
        throw new IllegalArgumentException("can't find " + utf8 + " encoding"); 
      } 
    } 
 
    protected DataOutputStream out; 
    private final byte[] keyValueSeparator;//key和value的分隔符,默认的好像是Tab 
 
    public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {//构造函数,初始化输出流及分隔符 
      this.out = out; 
      try { 
        this.keyValueSeparator = keyValueSeparator.getBytes(utf8); 
      } catch (UnsupportedEncodingException uee) { 
        throw new IllegalArgumentException("can't find " + utf8 + " encoding"); 
      } 
    } 
 
    public LineRecordWriter(DataOutputStream out) {//默认的分隔符 
      this(out, "\t"); 
    } 
 
    /**
    * Write the object to the byte stream, handling Text as a special输出流是byte格式的
    * case.
    * @param o the object to print是要输出的对象
    * @throws IOException if the write throws, we pass it on
    */ 
    private void writeObject(Object o) throws IOException {//应该是一行一行的写 key keyValueSeparator value \n 
      if (o instanceof Text) {//如果o是Text的实例 
        Text to = (Text) o; 
        out.write(to.getBytes(), 0, to.getLength());//写出 
      } else { 
        out.write(o.toString().getBytes(utf8)); 
      } 
    } 
 
    public synchronized void write(K key, V value)//给写线程加锁,写是互斥行为 
      throws IOException { 
//下面是为了判断key和value是否为空值 
      boolean nullKey = key == null || key instanceof NullWritable;//这语句太牛了 
      boolean nullValue = value == null || value instanceof NullWritable; 
      if (nullKey && nullValue) {// 
        return; 
      } 
      if (!nullKey) { 
        writeObject(key); 
      } 
      if (!(nullKey || nullValue)) { 
        out.write(keyValueSeparator); 
      } 
      if (!nullValue) { 
        writeObject(value); 
      } 
      out.write(newline); 
    } 
 
    public synchronized 
    void close(TaskAttemptContext context) throws IOException { 
      out.close(); 
    } 
  } 
 
  public RecordWriter<K, V>    getRecordWriter(TaskAttemptContext job//获得writer实例 
                        ) throws IOException, InterruptedException { 
    Configuration conf = job.getConfiguration(); 
    boolean isCompressed = getCompressOutput(job);// 
    String keyValueSeparator= conf.get("mapred.textoutputformat.separator", 
                                      "\t"); 
    CompressionCodec codec = null;//压缩格式 还是? 
    String extension = ""; 
    if (isCompressed) { 
      Class<? extends CompressionCodec> codecClass = 
        getOutputCompressorClass(job, GzipCodec.class); 
      codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf); 
      extension = codec.getDefaultExtension(); 
    } 
    Path file = getDefaultWorkFile(job, extension);//这个是获取缺省的文件路径及名称,在FileOutput中有对其的实现 
    FileSystem fs = file.getFileSystem(conf); 
    if (!isCompressed) { 
      FSDataOutputStream fileOut = fs.create(file, false); 
      return new LineRecordWriter<K, V>(fileOut, keyValueSeparator); 
    } else { 
      FSDataOutputStream fileOut = fs.create(file, false); 
      return new LineRecordWriter<K, V>(new DataOutputStream 
                                        (codec.createOutputStream(fileOut)), 
                                        keyValueSeparator); 
    } 
  } 

该类是在源代码中TextOutputFormat类基础上进行修改的,在这需要注意的一点是继承的父类FileOutputFormat是位于org.apache.hadoop.mapreduce.lib.output包中的

2、在主类中添加job.setOutputFormatClass(GBKFileOutputFormat.class);

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:http://www.heiqu.com/34472f674a89bf7a7c93a4f731a2f5b9.html