Ⅰ python中將兩個文件合並
你好:
其實這個問題不是很難啊:
請看代碼:
txtpath1=r"a.txt"
txtpath2=r"b.txt"
txtpath3=r"c.txt"
fpa=open(txtpath1)
fpb=open(txtpath2)
fpc=open(txtpath3,"w")
arrB=[]
forlinebinfpb.readlines():
arrB.append(lineb)
index=0
forlineainfpa.readlines():
index=index+1
fpc.write(linea)
foriinrange((index-1)*10,(index)*10):
try:
fpc.write(arrB[i])
except:
pass
print"Done!"
fpa.close()
fpb.close()
fpc.close()
Ⅱ 用Python 將兩個文件的內容合並成一個新的文件.
f1 = open("mit.txt", 'a+')
f2 = open("unitcode.txt",'r')
f3 = open("unitname.txt",'r')
s2 = f2.read().replace('\n', '').split(',')
s3 = f3.read().replace('\n', '').split(',')
f1.write('Unit name\tUnit Codes\n')
for i1, i2 in zip(s2, s3):
f1.write("%s\t%s\n" % (i1.strip(), i2.strip()))
f1.close()
f2.close()
f3.close()
如果就是想讓格式對的很齊的化,只要格式化輸出就行了。用ljust的函數對齊就行了。
Ⅲ Python 讀取文檔各行中同一列數據並按首尾相接合並輸出到另一個文件中
def_556225095(infile,outfile):
reader=open(infile,'r')
writer=open(outfile,'w')
buff=[]
buff_size=1000#文件太大,分塊緩存輸出
whileTrue:
line=reader.readline()
iflen(line)==0:
break
field=line.split()[2]
buff.append(field.strip())
iflen(buff)>=buff_size:
writer.write(''.join(buff))
buff=[]
else:
writer.write(''.join(buff))
writer.close()
reader.close()
Ⅳ 求一個python3爬蟲代碼,可以從小說網站上直接把小說的文字抄下來,並整合到一個新的文本里
frombs4importBeautifulSoup
fromrequests.
importre
importrequests
importos
defget_html_text(url):
try:
r=requests.get(url)
r.raise_for_status()
returnr.text
exceptRequestException:
returnNone
defget_chapter_names(html):
soup=BeautifulSoup(html,'lxml')
charpter=soup.select('.bg')
charpter_names=[]
forentryincharpter[1:]:
charpter_name=re.findall('<h2>(.*?)</h2>',str(entry))
file_name=re.findall('<ahref.*?>(.*?)</a>',str(entry))
ifcharpter_nameandfile_name:
fornameinfile_name:
name=name.split('')[0]
charpter_names.append(charpter_name[0]+'_'+name)
else:
pass
returnset(charpter_names)
defget_each_url(html):
soup=BeautifulSoup(html,'lxml')
urls=soup.select('ullia')
forurlinurls:
link=url.get('href')
text=url.text.split('')[0]
full_name=url.text.replace('?','')
yield{'url':link,'text':text,'full_name':full_name}
print(text)
defget_text(url):
r=requests.get(url)
r.encoding=r.apparent_encoding
soup=BeautifulSoup(r.text,'lxml')
items=soup.select('div.content-body')
item=re.findall(';(.*?);',items[0].text,re.S)
returnitem[0].encode()
defsave_to_file(url,text,full_name):
base_dir='mu'
path='{}\{}\{}'.format(os.getcwd(),base_dir,text)
ifnotos.path.exists(path):
try:
os.makedirs(path)
except:
pass
try:
withopen(path+'\'+full_name+'.txt','wb')asf:
f.write(get_text(url))
except:
pass
defmain():
url='http://seputu.com/'
html=get_html_text(url)
chapters=get_chapter_names(html)
forchapterinchapters:
foreachinget_each_url(html):
ifeach['text']==chapter.split('_')[-1]:
save_to_file(each['url'],chapter,each['full_name'])
if__name__=='__main__':
main()
Ⅳ python 如何把多個文件內容合並到以一個文件
Python編程將多個文件合並,代碼如下:
#例子:合並a.txt、b.txt、c.txt合並成d.txt文件
#文件列表,遍於讀取
flist = ['a.txt','b.txt','c.txt']
#要寫入的文件
ofile = open('d.txt', 'w')
#遍歷讀取所有文件,並寫入到輸出文件
for fr in flist:
for txt in open(fr, 'r'):
ofile.write(txt)
ofile.close()
效果如下: