목록Data Science/Crawler (4)
Studio KimHippo :D
필요 패키지 from bs4 import BeautifulSoup as bs from pprint import pprint import requests as req from time import sleep import winsound as ws import os 클래스 부분 class advanced_kim_crawl: def __init__(self, in_url): self.url = in_url def get_parser(self): html = req.get(self.url) self.soup = bs(html.content, 'lxml') return self.soup def get_obj(self, selector, number_of_data): soup = self.get_parser() i..
sub_kim_crawl에 필요한 패키지 # -*- coding : utf-8 -*- from Kim_crawl_mk1 import crawl as kcc from time import sleep import os sub_kim_crawl class sub_crawl: def __init__(self): pass def none(self, in_obj): if (in_obj == 'None') or (in_obj == 'none'): in_obj = None return in_obj def get_parameters(self): print('\n ================================================================= \n') file_name = input(..
필요 패키지 # -*- coding : utf-8 -*- from Kim_crawl_class import crawl as kcc import os, sys, time import winsound as ws 크롤링 함수 class kim_crawl_class_mk2: def __init__(self): pass def none(self, in_obj): if in_obj == 'None': in_obj = None return in_obj def main(self): try: print('\n') print('================================= [Hello, My name is Kim Crawl] =================================') print('===..
필요 패키지 from bs4 import BeautifulSoup as bs from pprint import pprint import requests as req import os, re 클래스 부분 class crawl: def __init__(self, in_url): self.url = in_url # self.driver = webdriver.Chrome('chromedriver') # self.driver.get(in_url) def get_parser(self): html = req.get(self.url) soup = bs(html.text, 'html.parser') return soup def get_obj(self, in_tag, in_val = None, in_key = None, ..