englishtokorea/englishtokorea.py
2025-05-28 14:26:49 +09:00

126 lines
4.4 KiB
Python

import os
import requests
import pandas as pd
from bs4 import BeautifulSoup
def fetch_search_results(search_term):
# URL 설정
base_url = 'http://aha-dic.com/View.asp?word='
url = f"{base_url}{search_term}"
# HTTP GET 요청을 보내고 응답을 가져옴
response = requests.get(url)
response.raise_for_status()
return response.text
def parse_results(html, download_folder):
soup = BeautifulSoup(html, 'html.parser')
results = []
# 결과를 포함하는 div를 찾기
result_div = soup.find('div', id='result')
if result_div:
# class 'word' 텍스트 추출
word_span = result_div.find('span', class_='word')
word = word_span.get_text(strip=True) if word_span else ''
# class 'phoneticKor' 텍스트 추출
phonetic_kor_span = result_div.find('span', class_='phoneticKor')
phonetic_kor = phonetic_kor_span.get_text(strip=True) if phonetic_kor_span else ''
# class 'playSound middle'에서 mp3 url 추출
play_sound = result_div.find('span', class_='playSound middle')
mp3_url = play_sound['mp3'] if play_sound else ''
if mp3_url:
full_mp3_url = f"http://aha-dic.com{mp3_url}"
mp3_filename = os.path.join(download_folder, os.path.basename(mp3_url))
download_file(full_mp3_url, mp3_filename)
else:
full_mp3_url = ''
# ul li HTML 코드 추출 및 예제 품사 추출
ul = result_div.find('ul')
li_elements = ul.find_all('li') if ul else []
meanings = [li.get_text(strip=True) for li in li_elements]
# 예문 및 품사 패널 내용 추출
panels = result_div.find_all('fieldset', class_='panel')
example_sentence = ''
part_of_speech = ''
for panel in panels:
legend = panel.find('legend')
span = panel.find('span')
if legend and span:
if '예문' in legend.get_text(strip=True):
example_sentence = span.get_text(strip=True)
elif '품사' in legend.get_text(strip=True):
part_of_speech = span.get_text(strip=True)
results.append({
'Word': word,
'PhoneticKor': phonetic_kor,
'MP3_URL': full_mp3_url,
'Meanings': '; '.join(meanings),
'ExampleSentence': example_sentence,
'PartOfSpeech': part_of_speech
})
return results
def download_file(url, local_filename):
# MP3 파일을 다운로드하여 지정된 경로에 저장
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Downloaded: {local_filename}")
except Exception as e:
print(f"Failed to download {url}: {e}")
def main():
# 검색어가 포함된 CSV 파일 경로
search_terms_file = 'search_terms.csv' # CSV 파일 경로 설정
download_folder = 'D:\\downloads' # 다운로드할 폴더 경로 설정
if not os.path.exists(download_folder):
os.makedirs(download_folder)
# CSV 파일에서 검색어 불러오기
search_terms_df = pd.read_csv(search_terms_file)
print("CSV 파일의 내용:")
print(search_terms_df.head()) # CSV 파일 내용 확인
if 'search_term' not in search_terms_df.columns:
raise KeyError("CSV 파일에 'search_term' 열이 없습니다.")
search_terms = search_terms_df['search_term'].tolist()[:5] # 테스트용으로 첫 5개의 검색어만 가져옴
all_results = []
# 검색어 루프 돌면서 검색 및 결과 저장
for i, search_term in enumerate(search_terms, start=1):
print(f"Processing {i}/{len(search_terms)}: {search_term}")
try:
html = fetch_search_results(search_term)
results = parse_results(html, download_folder)
all_results.extend(results)
print(f"Finished processing {search_term}")
except Exception as e:
print(f"Error processing {search_term}: {e}")
# 결과를 CSV 파일로 저장
output_filename = 'search_results.csv'
df = pd.DataFrame(all_results)
df.to_csv(output_filename, index=False, encoding='utf-8')
print(f'All results have been saved to {output_filename}')
if __name__ == '__main__':
main()