Revision 49795
Added by Claudio Atzori over 6 years ago
SubmitMapreduceJobAction.java | ||
---|---|---|
5 | 5 |
import java.util.Map; |
6 | 6 |
import java.util.Map.Entry; |
7 | 7 |
|
8 |
import org.apache.commons.logging.Log; |
|
9 |
import org.apache.commons.logging.LogFactory; |
|
10 |
import org.apache.hadoop.conf.Configuration; |
|
11 |
import org.apache.hadoop.fs.Path; |
|
12 |
import org.apache.hadoop.mapred.JobConf; |
|
13 |
import org.apache.hadoop.mapred.RunningJob; |
|
14 |
|
|
15 | 8 |
import eu.dnetlib.data.hadoop.HadoopJob; |
16 | 9 |
import eu.dnetlib.data.hadoop.config.ClusterName; |
17 | 10 |
import eu.dnetlib.data.hadoop.mapred.MapreduceJobMonitor; |
... | ... | |
21 | 14 |
import eu.dnetlib.data.hadoop.utils.ScanProperties; |
22 | 15 |
import eu.dnetlib.enabling.tools.blackboard.BlackboardJob; |
23 | 16 |
import eu.dnetlib.miscutils.functional.xml.IndentXmlString; |
17 |
import org.apache.commons.logging.Log; |
|
18 |
import org.apache.commons.logging.LogFactory; |
|
19 |
import org.apache.hadoop.conf.Configuration; |
|
20 |
import org.apache.hadoop.fs.Path; |
|
21 |
import org.apache.hadoop.mapred.JobClient; |
|
22 |
import org.apache.hadoop.mapred.JobConf; |
|
23 |
import org.apache.hadoop.mapred.RunningJob; |
|
24 | 24 |
|
25 | 25 |
public class SubmitMapreduceJobAction extends AbstractSubmitAction { |
26 | 26 |
|
... | ... | |
38 | 38 |
try { |
39 | 39 |
final JobConf jobConf = prepareJob(getConf(clusterName), jobName, jobProfile, bbJob.getParameters()); |
40 | 40 |
|
41 |
if (!hadoopClientMap.isMapreduceAvailable(clusterName)) |
|
42 |
throw new HadoopServiceException("mapreduce not available for cluster: " + clusterName.toString()); |
|
43 |
|
|
44 | 41 |
logJobDetails(jobConf); |
45 | 42 |
|
46 |
final RunningJob runningJob = hadoopClientMap.getJtClient(clusterName).submitJob(jobConf); |
|
43 |
final JobClient jtClient = hadoopClientMap.getJtClient(clusterName); |
|
44 |
final RunningJob runningJob = jtClient.submitJob(jobConf); |
|
45 |
|
|
47 | 46 |
final String jobId = newJobId(clusterName, runningJob.getID().getId()); |
48 | 47 |
|
49 |
jobRegistry.registerJob(HadoopJob.newInstance(jobId, clusterName, jobProfile,
|
|
50 |
new MapreduceJobMonitor(runningJob, callback)));
|
|
48 |
jobRegistry.registerJob( |
|
49 |
HadoopJob.newInstance(jobId, clusterName, jobProfile, new MapreduceJobMonitor(jtClient, runningJob, callback)));
|
|
51 | 50 |
|
52 | 51 |
} catch (final IOException e) { |
53 | 52 |
throw new HadoopServiceException("error executing hadoop job: " + jobName, e); |
Also available in: Unified diff
lazy init of hadoop clients allow to define the user for HBase admin and jtClient once. The user is available as service property