公司某个站点删除大量稿件,但是这些稿件已经被百度收录,这样用户访问将会出现404,用户体验不太好,所以需要将删除的稿件生成为xml格式文件,并且每个文件为5000条数据,然后提交至百度进行收录删除。
https://www.abc.com/html/ys/13003183/20191115/123456.html
https://www.abc.com/html/ys/13003183/20191115/123765.html
https://www.abc.com/html/ys/13003183/20191115/567567.html
https://www.abc.com/html/ys/13003183/20191115/456456.html
https://www.abc.com/html/ys/13003183/20191115/374456.html
https://www.abc.com/html/ys/13003183/20191115/37456645.html
<urlset>
<url> <loc> https://www.abc.com/html/ys/13003183/20191115/37404973.html </loc> </url>
<url> <loc> https://www.abc.com/html/jb/13003184/20191115/37404988.html </loc> </url>
<url> <loc> https://www.abc.com/html/jb/13003184/20191115/37404968.html </loc> </url>
<url> <loc> https://www.abc.com/ylaq/13003182/20191115/37404860.html </loc> </url>
<url> <loc> https://www.abc.com/ylaq/13003182/20191115/37404861.html </loc> </url>
</urlset>
cat xml.sh #!/bin/bash # sed -i 's/^/<url> <loc> /g' $1 sed -i 's/$/ </loc> </url>/g' $1 name=`echo $1 | awk -F"." '{print $1}'` echo $name split -l 5000 $1 ${name}_xml for filename in `find ./ -name "${name}_xml*"` do sed -i '1 i\<urlset>' $filename echo "</urlset>" >> $filename mv $filename ${filename}.xml done
sh xml.sh 文件名称