如何在文件中部插入信息
1 2 3 4 5 6 7 8 9 | fp = open ( 'D://代码开发//Python.path//jhp//fadd.txt' , 'r' ) #指定文件 s = fp.read() #将指定文件读入内存 fp.close() #关闭该文件 a = s.split( 'n' ) a.insert( - 1 , 'a new line' ) #在第 LINE+1 行插入 s = 'n' .join(a) #用'n'连接各个元素 fp = open ( 'D://代码开发//Python.path//jhp//fadd.txt' , 'w' ) fp.write(s) fp.close() |
结果:
“properties”:{
“zookeeper.connect”:”zookeeper.com:2015″,
“druid.discovery.curator.path”:”/druid/discovery”,
“druid.selectors.indexing.serviceName”:”druid/overlord”,
“commit.periodMillis”:”12500″,
“consumer.numThreads”:”1″,
“kafka.zookeeper.connect”:”kafkaka.com:2181,kafka.com:2181,kafka.com:2181″,
“kafka.group.id”:”test_dataSource_hod_dd”
a new line
}
实现在文本指定位置插入内容
1. 场景
生产环境需要对大量的json文件进行写操作,在指定节点中插入一个属性。如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | { "dataSources" :{ "test_dataSource_hod" :{ "spec" :{ "dataSchema" :{ "dataSource" : "test_dataSource_hod" , "parser" :{ "type" : "string" , "parseSpec" :{ "timestampSpec" :{ "column" : "timestamp" , "format" : "yyyy-MM-dd HH:mm:ss" }, "dimensionsSpec" :{ "dimensions" :[ "method" , "key" ] }, "format" : "json" } }, "granularitySpec" :{ "type" : "uniform" , "segmentGranularity" : "hour" , "queryGranularity" : "none" }, "metricsSpec" :[ { "name" : "count" , "type" : "count" }, { "name" : "call_count" , "type" : "longSum" , "fieldName" : "call_count" }, { "name" : "succ_count" , "type" : "longSum" , "fieldName" : "succ_count" }, { "name" : "fail_count" , "type" : "longSum" , "fieldName" : "fail_count" } ] }, "ioConfig" :{ "type" : "realtime" }, "tuningConfig" :{ "type" : "realtime" , "maxRowsInMemory" : "100000" , "intermediatePersistPeriod" : "PT10M" , "windowPeriod" : "PT10M" } }, "properties" :{ "task.partitions" : "1" , "task.replicants" : "1" , "topicPattern" : "test_topic" } } }, "properties" :{ "zookeeper.connect" : "zookeeper.com:2015" , "druid.discovery.curator.path" : "/druid/discovery" , "druid.selectors.indexing.serviceName" : "druid/overlord" , "commit.periodMillis" : "12500" , "consumer.numThreads" : "1" , "kafka.zookeeper.connect" : "kafkaka.com:2181,kafka.com:2181,kafka.com:2181" , "kafka.group.id" : "test_dataSource_hod_dd" } } |
需要在最后的properties节点中添加一个”druidBeam.randomizeTaskId”:”true”属性。
2. 思路
大概的思路如下:
- 扫描文件夹下所有需要更改的文件
- 在文件中确认需要更改的位置
- 插入新的字符
我觉得稍微有点难的地方是在确认插入位置的地方。我们知道的是”druid.selectors.indexing.serviceName”:”druid/overlord”,这个东西肯定在这个节点中,那我只要能找到这个东西,然后在他的后面 插入就OK了。
好了,思路已经有了,写代码吧。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | #!/usr/bin/python # coding:utf-8 import os old_string = '"druid/overlord"' new_string = ( '"druid/overlord",' + 'n ' + '"druidBeam.randomizeTaskId":"true",' ) def insertrandomproperty(file_name): if '.json' in file_name: with open ( file , 'r' ) as oldfile: content = oldfile.read() checkandinsert(content, file ) else : pass def checkandinsert(content, file ): if 'druidBeam.randomizeTaskId' not in content: # to avoid ^M appear in the new file because of different os # we replace r with '' new_content = content.replace(old_string, new_string).replace( 'r' , '') with open ( file , 'w' ) as newfile: newfile.write(new_content) else : pass if __name__ = = '__main__' : files = os.listdir( '/home/tranquility/conf/service_bak' ) os.chdir( '/home/tranquility/conf/service_bak' ) for file in files: insertrandomproperty( file ) |
就是在内存中更新内容,然后重新写回到文件中。代码只是粗略的表达了思路,可以根据需求继续修改优化。
以上为个人经验,希望能给大家一个参考,也希望大家多多支持IT俱乐部。