关于Java序列化和Hadoop的序列化

时间:2023-11-22 10:34:44
 import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.Serializable; import org.apache.hadoop.io.Writable; public class Test2 {
public static void main(String[] args) throws IOException {
Student stu = new Student(1, "张三");
FileOutputStream fileOutputStream = new FileOutputStream("d:/111");
ObjectOutputStream objectOutputStream = new ObjectOutputStream(fileOutputStream);
objectOutputStream.writeObject(stu);
objectOutputStream.close();
fileOutputStream.close();
//我们一般只关注stu对象的id和name两个属性总共12个字节.但是Java的序列化到硬盘上的文件有175个字节.
//Java序列化了很多没有必要的信息.如果要序列化的数据有很多,那么序列化到磁盘上的数据会更多,非常的浪费.
//Hadoop没有使用Java的序列化机制.如果采用会造成集群的网络传输的时间和流量都集聚的增长.
//Hadoop中自己定义了一个序列化的接口Writable.
//Java序列化中之所以信息多是因为把 类之间的的继承多态信息都包含了. StuWritable stu2 = new StuWritable(1, "张三");
FileOutputStream fileOutputStream2 = new FileOutputStream("d:/222");
DataOutputStream dataOutputStream2 = new DataOutputStream(fileOutputStream2);
stu2.write(dataOutputStream2);
fileOutputStream2.close();
dataOutputStream2.close();
}
} class Student implements Serializable{
private Integer id;
private String name; public Student() {
super();
}
public Student(Integer id, String name) {
super();
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setNameString(String name) {
this.name = name;
} } class StuWritable implements Writable{
private Integer id;
private String name; public StuWritable() {
super();
}
public StuWritable(Integer id, String name) {
super();
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setNameString(String name) {
this.name = name;
} public void write(DataOutput out) throws IOException {
out.writeInt(id);
out.writeUTF(name);
} public void readFields(DataInput in) throws IOException {
this.id = in.readInt();
this.name = in.readUTF();
} }

使用Java序列化接口对应的磁盘上的文件: 共175个字节

关于Java序列化和Hadoop的序列化

使用Hadoop序列化机制对应的磁盘文件: 共12字节

关于Java序列化和Hadoop的序列化

如果类中有继承关系:

 import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.Serializable; import org.apache.hadoop.io.Writable; public class Test2 {
public static void main(String[] args) throws IOException {
//我们一般只关注stu对象的id和name两个属性总共12个字节.但是Java的序列化到硬盘上的文件有175个字节.
//Java序列化了很多没有必要的信息.如果要序列化的数据有很多,那么序列化到磁盘上的数据会更多,非常的浪费.
//Hadoop没有使用Java的序列化机制.如果采用会造成集群的网络传输的时间和流量都集聚的增长.
//Hadoop中自己定义了一个序列化的接口Writable.
//Java序列化中之所以信息多是因为把 类之间的的继承多态信息都包含了.再重新构建的时候可以保持原有的关系. StuWritable stu2 = new StuWritable(1, "张三");
stu2.setSex(true);
FileOutputStream fileOutputStream2 = new FileOutputStream("d:/222");
DataOutputStream dataOutputStream2 = new DataOutputStream(fileOutputStream2);
stu2.write(dataOutputStream2);
fileOutputStream2.close();
dataOutputStream2.close();
}
} class StuWritable extends Person implements Writable{
private Integer id;
private String name; public StuWritable() {
super();
}
public StuWritable(Integer id, String name) {
super();
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setNameString(String name) {
this.name = name;
} public void write(DataOutput out) throws IOException {
out.writeInt(id);
out.writeBoolean(super.isSex());
out.writeUTF(name);
} public void readFields(DataInput in) throws IOException {
this.id = in.readInt();
super.setSex(in.readBoolean());
this.name = in.readUTF();
} } class Person{
private boolean sex; public boolean isSex() {
return sex;
} public void setSex(boolean sex) {
this.sex = sex;
} }

这样序列化到磁盘上的文件: 13个字节  多了一个boolean属性,相比上面多了一个字节.

关于Java序列化和Hadoop的序列化

如果实例化对象中含有类对象.

 import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.Serializable; import org.apache.hadoop.io.Writable; public class Test2 {
public static void main(String[] args) throws IOException {
//我们一般只关注stu对象的id和name两个属性总共12个字节.但是Java的序列化到硬盘上的文件有175个字节.
//Java序列化了很多没有必要的信息.如果要序列化的数据有很多,那么序列化到磁盘上的数据会更多,非常的浪费.
//Hadoop没有使用Java的序列化机制.如果采用会造成集群的网络传输的时间和流量都集聚的增长.
//Hadoop中自己定义了一个序列化的接口Writable.
//Java序列化中之所以信息多是因为把 类之间的的继承多态信息都包含了.再重新构建的时候可以保持原有的关系. StuWritable stu2 = new StuWritable(1, "张三");
stu2.setSex(true);
FileOutputStream fileOutputStream2 = new FileOutputStream("d:/222");
DataOutputStream dataOutputStream2 = new DataOutputStream(fileOutputStream2);
stu2.write(dataOutputStream2);
fileOutputStream2.close();
dataOutputStream2.close();
}
} class StuWritable extends Person implements Writable{
private Integer id;
private String name;
private Student student; public StuWritable() {
super();
}
public StuWritable(Integer id, String name) {
super();
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setNameString(String name) {
this.name = name;
} public void write(DataOutput out) throws IOException {
out.writeInt(id);
out.writeBoolean(super.isSex());
out.writeUTF(name);
out.writeInt(student.getId());
out.writeUTF(student.getName());
} public void readFields(DataInput in) throws IOException {
this.id = in.readInt();
super.setSex(in.readBoolean());
this.name = in.readUTF();
this.student = new Student(in.readInt(),in.readUTF());
} } class Person{
private boolean sex; public boolean isSex() {
return sex;
} public void setSex(boolean sex) {
this.sex = sex;
} }

如果我们Student中有个字段是Writable类型的.
怎么样序列化?