1、json模板:
{
"job": {
"setting": {
"speed": {
"byte": 104857600
},
"errorLimit": {
"record": 0
}
},
"content": [{
"reader": {
"name": "hdfsreader",
"parameter": {
"column": [{
"index": 0,
"type": "string"
},
{
"index": 1,
"type": "string"
},
{
"index": 2,
"type": "string"
},
{
"index": 3,
"type": "string"
},
{
"index": 4,
"type": "string"
},
{
"index": 5,
"type": "string"
},
{
"index": 6,
"type": "date"
},
{
"index": 7,
"type": "string"
},
{
"index": 8,
"type": "date"
},
{
"index": 9,
"type": "string"
},
{
"index": 10,
"type": "string"
},
{
"index": 11,
"type": "string"
},
{
"index": 12,
"type": "string"
},
{
"index": 12,
"type": "string"
}],
"defaultFS": "hdfs://**.**.**.**:8020",
"hdfsUser": "hive",
"haveKerberos": true,
"kerberosKeytabFilePath": "/opt/software/kerberos/rsk.keytab",
"kerberosPrincipal": "rsk@TDH",
"hadoopConfig": {
"dfs.data.transfer.protection": "integrity"
},
"kerberosConfFilePath": "/opt/software/kerberos/krb5.conf",
"path": "/inceptor1/user/hive/warehouse/dm_hs.db/rsk/ods_jy_jydb_bond_cbvaluationall_on",
"fileType": "orc",
"fieldDelimiter": "\u0001",
"nullFormat": ""
}
},
"writer": {
"name": "oraclewriter",
"parameter": {
"username": "xrisk_ods",
"password": "xpar",
"column": ["id","name"],
"preSql": ["delete from test"],
"connection": [{
"jdbcUrl": "jdbc:oracle:thin:@**.**.**.**:1521:rdmdb",
"table": ["test"]
}]
}
}
}]
}
}
2、注意源数据端和datax端,数据类型的差异,reader端column的类型要填datax的数据类型。


|