189 8069 5689

记录python的selenium使用

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import sys
import urllib.parse
if len(sys.argv) <= 1:
    exit()

#对输入的词进行urlencode
queryword = urllib.parse.quote_plus(str(sys.argv[1]))

#设置chrome的执行方式为headless;即非打开窗口执行
chrome_options = Options()
chrome_options.add_argument("--headless")

driver = webdriver.Chrome(chrome_options=chrome_options)

url = "https://hanyu.baidu.com/zici/s?wd="+queryword+"&query="+queryword+"&srcid=28232&from=kg0&from=kg0";

driver.get("https://hanyu.baidu.com/zici/s?wd="+queryword+"&query="+queryword+"&srcid=28232&from=kg0&from=kg0")

htmldata = driver.page_source

print(htmldata)
driver.quit()

print("您输入查询的词是:"+sys.argv[1])

soup = BeautifulSoup(htmldata,'html.parser');

#得到查询文字拼音
pinyin_dt =soup.find('dt',class_='pinyin');
if not pinyin_dt:
    pingyin_div=soup.find('div',id='pinyin');
    if not pingyin_div:
        print("拼音:未查询到:(")
    else:
        pingyin_b = pingyin_div.find('b')
        if not pingyin_b:
            print("拼音:"+pingyin_div.string.strip())
        else:
            print("拼音:"+pingyin_b.string.strip())
else:
    print("拼音:"+pinyin_dt.string.strip())

#得到查询文字的基本释义
basicmean_div = soup.find("div",id="basicmean-wrapper")
if basicmean_div:
    basicmean_ps = basicmean_div.find_all("p")
    count = 1;
    if basicmean_ps:
        for basicmean_p in basicmean_ps:
            basicmean_p = str(basicmean_p)
            if basicmean_p:
                basicmean_p = basicmean_p.replace("

","") basicmean_p = basicmean_p.replace("

","") basicmean_p = basicmean_p.replace("","") basicmean_p = basicmean_p.replace("","") print("基本释义("+str(count)+"):"+basicmean_p.strip()) count +=1 #得到查询文字的翻译 fanyi_div = soup.find("div",id="fanyi-wrapper") if fanyi_div: fanyi_dt = fanyi_div.find("dt") if fanyi_dt: print("英文翻译:"+fanyi_dt.string.strip())

网站题目:记录python的selenium使用
新闻来源:http://cdxtjz.com/article/goccse.html

其他资讯