Project

General

Profile

1
package eu.dnetlib.data.mapreduce.hbase.dedup;
2

    
3
import java.io.IOException;
4
import java.util.*;
5

    
6
import com.google.common.collect.Lists;
7
import eu.dnetlib.data.mapreduce.JobParams;
8
import eu.dnetlib.data.mapreduce.util.DedupUtils;
9
import eu.dnetlib.data.proto.RelTypeProtos.SubRelType;
10
import eu.dnetlib.data.proto.TypeProtos.Type;
11
import eu.dnetlib.pace.clustering.NGramUtils;
12
import eu.dnetlib.pace.config.DedupConfig;
13
import eu.dnetlib.pace.distance.PaceDocumentDistance;
14
import eu.dnetlib.pace.distance.eval.ScoreResult;
15
import eu.dnetlib.pace.model.Field;
16
import eu.dnetlib.pace.model.MapDocument;
17
import eu.dnetlib.pace.model.MapDocumentComparator;
18
import eu.dnetlib.pace.model.MapDocumentSerializer;
19
import org.apache.commons.lang.StringUtils;
20
import org.apache.commons.logging.Log;
21
import org.apache.commons.logging.LogFactory;
22
import org.apache.hadoop.hbase.client.Put;
23
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
24
import org.apache.hadoop.hbase.mapreduce.TableReducer;
25
import org.apache.hadoop.hbase.util.Bytes;
26
import org.apache.hadoop.io.Text;
27

    
28
public class DedupReducer extends TableReducer<Text, ImmutableBytesWritable, ImmutableBytesWritable> {
29

    
30
	private static final Log log = LogFactory.getLog(DedupReducer.class);
31

    
32
	private DedupConfig dedupConf;
33

    
34
	private ImmutableBytesWritable ibw;
35

    
36
	@Override
37
	protected void setup(final Context context) throws IOException, InterruptedException {
38

    
39
		dedupConf = DedupConfig.load(context.getConfiguration().get(JobParams.DEDUP_CONF));
40
		ibw = new ImmutableBytesWritable();
41

    
42
		log.info("dedup reduce phase \npace conf: " + dedupConf.toString());
43
	}
44

    
45
	@Override
46
	protected void reduce(final Text key, final Iterable<ImmutableBytesWritable> values, final Context context) throws IOException, InterruptedException {
47

    
48
		final Queue<MapDocument> q = prepare(context, key, values);
49

    
50
		if (q.size() > 1) {
51
			log.info("reducing key: '" + key + "' records: " + q.size());
52

    
53
			switch (Type.valueOf(dedupConf.getWf().getEntityType())) {
54
			case result:
55
				process(simplifyQueue(q, key.toString(), context), context);
56
				break;
57
			case organization:
58
				process(q, context);
59
				break;
60
			default:
61
				throw new IllegalArgumentException("process not implemented for type: " + dedupConf.getWf().getEntityType());
62
			}
63
		} else {
64
			context.getCounter(dedupConf.getWf().getEntityType(), "records per hash key = 1").increment(1);
65
		}
66
	}
67

    
68
	private Queue<MapDocument> prepare(final Context context, final Text key, final Iterable<ImmutableBytesWritable> values) {
69
		final Queue<MapDocument> queue = new PriorityQueue<MapDocument>(100, new MapDocumentComparator(dedupConf.getWf().getOrderField()));
70

    
71
		final Set<String> seen = new HashSet<String>();
72
		final int queueMaxSize = dedupConf.getWf().getQueueMaxSize();
73
		int count = 0;
74
		boolean logged = false;
75

    
76
		for (final ImmutableBytesWritable i : values) {
77
			count++;
78

    
79
			if (queue.size() <= queueMaxSize) {
80
				final MapDocument doc = MapDocumentSerializer.decode(i.copyBytes());
81
				final String id = doc.getIdentifier();
82

    
83
				if (!seen.contains(id)) {
84
					seen.add(id);
85
					queue.add(doc);
86
				}
87

    
88
			} else {
89
				if (!logged) {
90
					// context.getCounter("ngram size > " + LIMIT, "'" + key.toString() + "', --> " + context.getTaskAttemptID()).increment(1);
91
					context.getCounter("ngram size > " + queueMaxSize, "N").increment(1);
92
					log.info("breaking out after limit (" + queueMaxSize + ") for ngram '" + key + "'");
93
					logged = true;
94
				}
95
			}
96
		}
97

    
98
		log.info(String.format("cluster key '%s' size '%s'", key, count));
99

    
100
		return queue;
101
	}
102

    
103
	private Queue<MapDocument> simplifyQueue(final Queue<MapDocument> queue, final String ngram, final Context context) {
104
		final Queue<MapDocument> q = new LinkedList<MapDocument>();
105

    
106
		String fieldRef = "";
107
		final List<MapDocument> tempResults = Lists.newArrayList();
108

    
109
		while (!queue.isEmpty()) {
110
			final MapDocument result = queue.remove();
111

    
112
			final String orderFieldName = dedupConf.getWf().getOrderField();
113
			final Field orderFieldValue = result.values(orderFieldName);
114
			if (!orderFieldValue.isEmpty()) {
115
				final String field = NGramUtils.cleanupForOrdering(orderFieldValue.stringValue());
116
				if (field.equals(fieldRef)) {
117
					tempResults.add(result);
118
				} else {
119
					populateSimplifiedQueue(q, tempResults, context, fieldRef, ngram);
120
					tempResults.clear();
121
					tempResults.add(result);
122
					fieldRef = field;
123
				}
124
			} else {
125
				context.getCounter(dedupConf.getWf().getEntityType(), "missing " + dedupConf.getWf().getOrderField()).increment(1);
126
			}
127
		}
128
		populateSimplifiedQueue(q, tempResults, context, fieldRef, ngram);
129

    
130
		return q;
131
	}
132

    
133
	private void populateSimplifiedQueue(final Queue<MapDocument> q,
134
			final List<MapDocument> tempResults,
135
			final Context context,
136
			final String fieldRef,
137
			final String ngram) {
138
		if (tempResults.size() < dedupConf.getWf().getGroupMaxSize()) {
139
			q.addAll(tempResults);
140
		} else {
141
			context.getCounter(dedupConf.getWf().getEntityType(),
142
					"Skipped records for count(" + dedupConf.getWf().getOrderField() + ") >= " + dedupConf.getWf().getGroupMaxSize())
143
					.increment(tempResults.size());
144
			log.info("Skipped field: " + fieldRef + " - size: " + tempResults.size() + " - ngram: " + ngram);
145
		}
146
	}
147

    
148
	private void process(final Queue<MapDocument> queue, final Context context) throws IOException, InterruptedException {
149

    
150
		final PaceDocumentDistance algo = new PaceDocumentDistance();
151

    
152
		while (!queue.isEmpty()) {
153

    
154
			final MapDocument pivot = queue.remove();
155
			final String idPivot = pivot.getIdentifier();
156

    
157
			final Field fieldsPivot = pivot.values(dedupConf.getWf().getOrderField());
158
			final String fieldPivot = (fieldsPivot == null) || fieldsPivot.isEmpty() ? null : fieldsPivot.stringValue();
159

    
160
			if (fieldPivot != null) {
161
				// System.out.println(idPivot + " --> " + fieldPivot);
162

    
163
				int i = 0;
164
				for (final MapDocument curr : queue) {
165
					final String idCurr = curr.getIdentifier();
166

    
167
					if (mustSkip(idCurr)) {
168
						context.getCounter(dedupConf.getWf().getEntityType(), "skip list").increment(1);
169
						break;
170
					}
171

    
172
					if (i > dedupConf.getWf().getSlidingWindowSize()) {
173
						break;
174
					}
175

    
176
					final Field fieldsCurr = curr.values(dedupConf.getWf().getOrderField());
177
					final String fieldCurr = (fieldsCurr == null) || fieldsCurr.isEmpty() ? null : fieldsCurr.stringValue();
178

    
179
					if (!idCurr.equals(idPivot) && (fieldCurr != null)) {
180

    
181
						final ScoreResult sr = similarity(algo, pivot, curr);
182
						emitOutput(sr, idPivot, idCurr, context);
183
						i++;
184
					}
185
				}
186
			}
187
		}
188
	}
189

    
190
	private void emitOutput(final ScoreResult sr, final String idPivot, final String idCurr, final Context context) throws IOException, InterruptedException {
191
		final double d = sr.getScore();
192

    
193
		if (d >= dedupConf.getWf().getThreshold()) {
194
			writeSimilarity(context, idPivot, idCurr, d);
195
			context.getCounter(dedupConf.getWf().getEntityType(), SubRelType.dedupSimilarity.toString() + " (x2)").increment(1);
196
		} else {
197
			context.getCounter(dedupConf.getWf().getEntityType(), "d < " + dedupConf.getWf().getThreshold()).increment(1);
198
		}
199
	}
200

    
201
	private ScoreResult similarity(final PaceDocumentDistance algo, final MapDocument a, final MapDocument b) {
202
		try {
203
			return algo.between(a, b, dedupConf);
204
		} catch(Throwable e) {
205
			log.error(String.format("\nA: %s\n----------------------\nB: %s", a, b), e);
206
			throw new IllegalArgumentException(e);
207
		}
208
	}
209

    
210
	private boolean mustSkip(final String idPivot) {
211
		return dedupConf.getWf().getSkipList().contains(getNsPrefix(idPivot));
212
	}
213

    
214
	private String getNsPrefix(final String id) {
215
		return StringUtils.substringBetween(id, "|", "::");
216
	}
217

    
218
	private void writeSimilarity(final Context context, final String idPivot, final String id, final double d) throws IOException, InterruptedException {
219
		final byte[] rowKey = Bytes.toBytes(idPivot);
220
		final byte[] target = Bytes.toBytes(id);
221

    
222
		//log.info("writing similarity: " + idPivot + " <-> " + id);
223

    
224
		emitRel(context, rowKey, target, d);
225
		emitRel(context, target, rowKey, d);
226
	}
227

    
228
	private void emitRel(final Context context, final byte[] from, final byte[] to, final double d) throws IOException, InterruptedException {
229
		final Type type = Type.valueOf(dedupConf.getWf().getEntityType());
230

    
231
		//final OafRel.Builder rel = DedupUtils.getDedupSimilarity(dedupConf, new String(from), new String(to));
232
		//final Oaf.Builder oaf = DedupUtils.buildRel(dedupConf, rel, d);
233

    
234
		//final Put put = new Put(from).add(DedupUtils.getSimilarityCFBytes(type), to, oaf.build().toByteArray());
235
		final Put put = new Put(from).add(DedupUtils.getSimilarityCFBytes(type), to, Bytes.toBytes(""));
236
		put.setWriteToWAL(JobParams.WRITE_TO_WAL);
237
		ibw.set(from);
238
		context.write(ibw, put);
239
	}
240
}
(10-10/16)