Доброго времени суток! В данной статье покажем, как можно добыть бесплатные публичные ****** различных стран. Приступим. Для работы скрипта нам необходим Python. Установим 3.12.5 Код парсера доработан! Будут обновления! Взята статья из прежней ветки на форуме. Установка Python Скачиваем установочный файл с официального сайта Ссылка: https://www.python.org/downloads/ (Прямая ссылка на файл) Обязательно нажимаем на галочку "Add Python 3.12.5 to PATH" После установки Python, необходимо установить библиотеки для получения ****** и их проверки на работоспособность Установка Библиотек Нажимаем Win+R, вписываем cmd, нажимаем Enter [B]Код[/B] pip install requests pip install lxml pip install beautifulsoup4 pip install cfscrape После установки библиотек создаём файл с расширением .py, куда вставляем следующий код. Код отвечает за парсинг ****** с различных сайтов. Можно использовать редактор notepad++ Код Парсера import cfscrape from bs4 import BeautifulSoup from datetime import date def main_advanced_name(): try: print("Start || Site - advanced.name") scraper = cfscrape.create_scraper() r = scraper.get('https://advanced.name/ru/freeproxy') soup = BeautifulSoup(r.text, 'lxml') proxy_list_div = soup.find('div', {'class': 'freeProxy__list'}) if proxy_list_div: proxies = proxy_list_div.find_all('div', {'class': 'freeProxy__list__item'}) if proxies: for proxy in proxies: try: ip_port = proxy.find('span', {'class': 'freeProxy__list__ip'}).text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Не удалось найти элементы ******.") else: print("Не удалось найти основной блок с ******.") except Exception as e: print(f"Ошибка: {e}") def main_best_proxies_ru(): try: print("Start || Site - best-proxies.ru") scraper = cfscrape.create_scraper() r = scraper.get('https://best-proxies.ru/proxylist/free/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_="proxies_table") if table: line = table.find('tbody').find_all('tr') for tr in line: try: td = tr.find_all('td') ip_port = f"{td[0].text.strip()}:{td[1].text.strip()}" with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_fineproxy(): try: print("Start || Site - fineproxy.org") scraper = cfscrape.create_scraper() r = scraper.get('https://fineproxy.org/ru/free-proxies/europe/russia/') soup = BeautifulSoup(r.text, 'lxml') proxies = soup.find_all('div', {'class': 'freeProxy__list__item'}) if proxies: for proxy in proxies: try: ip_port = proxy.find('span', {'class': 'freeProxy__list__ip'}).text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Не удалось найти элементы ******.") except Exception as e: print(f"Ошибка: {e}") def main_proxyscrape_ru(): try: print("Start || Site - proxyscrape.com (RU)") scraper = cfscrape.create_scraper() r = scraper.get('https://ru.proxyscrape.com/%D1%81%D0%BF%D0%B8%D1%81%D0%BE%D0%BA-%D0%B1%D0%B5%D1%81%D0%BF%D0%BB%D0%B0%D1%82%D0%BD%D1%8B%D1%85-%D0%BF%D1%80%D0%BE%D0%BA%D1%81%D0%B8-%D1%81%D0%B5%D1%80%D0%B2%D0%B5%D1%80%D0%BE%D0%B2') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', {'class': 'data'}) if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = proxy.find_all('td')[0].text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_proxyline_net(): try: print("Start || Site - proxyline.net") scraper = cfscrape.create_scraper() r = scraper.get('https://proxyline.net/besplatnye-onlajn-proksi-servera/') soup = BeautifulSoup(r.text, 'lxml') proxies = soup.find_all('li', {'class': 'free-proxy__list__item'}) if proxies: for proxy in proxies: try: ip_port = proxy.text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Не удалось найти элементы ******.") except Exception as e: print(f"Ошибка: {e}") def main_free_proxy_cz(): try: print("Start || Site - free-proxy.cz") scraper = cfscrape.create_scraper() r = scraper.get('http://free-proxy.cz/ru/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', {'id': 'proxy_list'}) if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = f"{proxy.find_all('td')[0].text.strip()}:{proxy.find_all('td')[1].text.strip()}" with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_xseo_in(): try: print("Start || Site - xseo.in") scraper = cfscrape.create_scraper() r = scraper.get('https://xseo.in/freeproxy') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='proxy_list') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = proxy.find('td').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_free_proxy_sale_ukraine(): try: print("Start || Site - free.proxy-sale.com (Ukraine)") scraper = cfscrape.create_scraper() r = scraper.get('https://free.proxy-sale.com/ru/ukraine/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='table') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip = proxy.find('td', class_='ip').text.strip() port = proxy.find('td', class_='port').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(f"{ip}:{port}\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_spaceproxy_net(): try: print("Start || Site - spaceproxy.net") scraper = cfscrape.create_scraper() r = scraper.get('https://spaceproxy.net/free-proxy/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='table') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = proxy.find('td').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_free_proxy_sale(): try: print("Start || Site - free.proxy-sale.com") scraper = cfscrape.create_scraper() r = scraper.get('https://free.proxy-sale.com/ru/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_='main__table-wrap') if table: proxies = table.find('table').find('tbody').find_all('tr') for proxy in proxies: try: ip = proxy.find('td', class_='ip').text.strip() port = proxy.find('td', class_='port').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(f"{ip}:{port}\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_hidxxx_name(): try: print("Start || Site - hidxxx.name") scraper = cfscrape.create_scraper() r = scraper.get('https://hidxxx.name/proxy-list/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='proxy__t') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip = proxy.find('td').text.strip() port = proxy.find_all('td')[1].text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(f"{ip}:{port}\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main2(): try: print("Start 2 step || Site - free-proxy-list.net") scraper = cfscrape.create_scraper() r = scraper.get('https://free-proxy-list.net') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='table table-striped table-bordered') if table: line = table.find('tbody').find_all('tr') for tr in line: try: td = tr.find_all('td') ip = td[0].text port = td[1].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + ":" + port + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main3(): try: print("Start 3 step || Site - free.proxy-sale.com") scraper = cfscrape.create_scraper() r = scraper.get('https://free.proxy-sale.com') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_="main__table-wrap") if table: line = table.find('table').find('tbody').find_all('tr') for i in line: try: td = i.find_all('td', class_="ip") for n in td: a = n.find("a").text.replace("Нажмите ⌘-С чтобы скопировать Скопировать в буфер", " ").strip() ip = a with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main4(): try: print("Start 4 step || Site - proxyscrape.com and openproxylist.xyz") scraper = cfscrape.create_scraper() response5 = scraper.get("https://openproxylist.xyz/http.txt") response6 = scraper.get("https://openproxylist.xyz/socks4.txt") response7 = scraper.get("https://openproxylist.xyz/socks5.txt") proxies5 = response5.text.strip() proxies6 = response6.text.strip() proxies7 = response7.text.strip() with open("proxy_scraped.txt", "a") as txt_file: txt_file.write(proxies5+"\n"+proxies6+"\n"+proxies7+"\n") except Exception as e: print(f"Ошибка: {e}") def main5(): try: print("Start 5 step || Site - hidemy.name") scraper = cfscrape.create_scraper() r = scraper.get('https://hidemy.name/ru/proxy-list/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_="table_block") if table: line = table.find('table').find('tbody').find_all('tr') for a in line: try: td = a.find_all('td') ip = td[0].text port = td[1].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + ":" + port + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main7(): try: print("Start 7 step || Site - sslproxies.org") scraper = cfscrape.create_scraper() r = scraper.get(f'https://www.sslproxies.org/#list') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_="table table-striped table-bordered") if table: line = table.find('tbody').find_all('tr') for a in line: try: td = a.find_all('td') ip = td[0].text port = td[1].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + ":" + port + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main8(): try: print("Start 8 step || Site - spys.one") scraper = cfscrape.create_scraper() r = scraper.get(f'https://spys.one') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', cellspacing="0", cellpadding="2") if table: subtable = table.find('table', cellspacing="1", cellpadding="1", align="left") if subtable: line = subtable.find_all('tr', class_="spy1x", onmouseover="this.style.background='#002424'") line2 = subtable.find_all('tr', class_="spy1xx", onmouseover="this.style.background='#002424'") for tr in line: try: td = tr.find_all('td') ip = td[0].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + '\n')) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") for tr2 in line2: try: td2 = tr2.find_all('td') ip2 = td2[0].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip2 + '\n')) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Подтаблица с ****** не найдена.") else: print("Основная таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main10(): try: print("Start 10 step || Site - userel.com") scraper = cfscrape.create_scraper() r = scraper.get(f'https://userel.com/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_="proxy") if table: line = table.find('table').find_all('tr') for tr in line: try: td = tr.find_all('td') ip = td[0].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip) + '\n') except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main11(): try: print("Start 11 step || Site - ArchiveProxy") scraper = cfscrape.create_scraper() ear, month, day = str(date.today()).split('-') ear = int(ear) month = int(month) day = int(day) for today in range(day): try: scoc = scraper.get(f'https://checkerproxy.net/api/archive/{ear}-{month}-{today}').json() for i in range(40000): try: with open('proxy_scraped.txt', 'a+') as file: file.write(scoc[i]['addr'] + '\n') except (KeyError, IndexError): pass except Exception as e: print(f"Ошибка при обработке архива: {e}") break except Exception as e: print(f"Ошибка: {e}") if __name__ == "__main__": # Вызов новых функций main_advanced_name() main_best_proxies_ru() main_fineproxy() main_proxyscrape_ru() main_proxyline_net() main_free_proxy_cz() main_xseo_in() main_free_proxy_sale_ukraine() main_spaceproxy_net() main_free_proxy_sale() main_hidxxx_name() # Вызов существующих функций main2() main3() main4() main5() main7() main8() main10() main11() # Очистка от дубликатов print("Duplicates cleaning...") with open("proxy_scraped.txt") as input_: result = dict.fromkeys(input_).keys() with open("proxy_scraped.txt", "w") as output: print(*result, file=output, sep="") print("Duplicates successfully cleared!") Python import cfscrape from bs4 import BeautifulSoup from datetime import date def main_advanced_name(): try: print("Start || Site - advanced.name") scraper = cfscrape.create_scraper() r = scraper.get('https://advanced.name/ru/freeproxy') soup = BeautifulSoup(r.text, 'lxml') proxy_list_div = soup.find('div', {'class': 'freeProxy__list'}) if proxy_list_div: proxies = proxy_list_div.find_all('div', {'class': 'freeProxy__list__item'}) if proxies: for proxy in proxies: try: ip_port = proxy.find('span', {'class': 'freeProxy__list__ip'}).text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Не удалось найти элементы ******.") else: print("Не удалось найти основной блок с ******.") except Exception as e: print(f"Ошибка: {e}") def main_best_proxies_ru(): try: print("Start || Site - best-proxies.ru") scraper = cfscrape.create_scraper() r = scraper.get('https://best-proxies.ru/proxylist/free/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_="proxies_table") if table: line = table.find('tbody').find_all('tr') for tr in line: try: td = tr.find_all('td') ip_port = f"{td[0].text.strip()}:{td[1].text.strip()}" with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_fineproxy(): try: print("Start || Site - fineproxy.org") scraper = cfscrape.create_scraper() r = scraper.get('https://fineproxy.org/ru/free-proxies/europe/russia/') soup = BeautifulSoup(r.text, 'lxml') proxies = soup.find_all('div', {'class': 'freeProxy__list__item'}) if proxies: for proxy in proxies: try: ip_port = proxy.find('span', {'class': 'freeProxy__list__ip'}).text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Не удалось найти элементы ******.") except Exception as e: print(f"Ошибка: {e}") def main_proxyscrape_ru(): try: print("Start || Site - proxyscrape.com (RU)") scraper = cfscrape.create_scraper() r = scraper.get('https://ru.proxyscrape.com/%D1%81%D0%BF%D0%B8%D1%81%D0%BE%D0%BA-%D0%B1%D0%B5%D1%81%D0%BF%D0%BB%D0%B0%D1%82%D0%BD%D1%8B%D1%85-%D0%BF%D1%80%D0%BE%D0%BA%D1%81%D0%B8-%D1%81%D0%B5%D1%80%D0%B2%D0%B5%D1%80%D0%BE%D0%B2') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', {'class': 'data'}) if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = proxy.find_all('td')[0].text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_proxyline_net(): try: print("Start || Site - proxyline.net") scraper = cfscrape.create_scraper() r = scraper.get('https://proxyline.net/besplatnye-onlajn-proksi-servera/') soup = BeautifulSoup(r.text, 'lxml') proxies = soup.find_all('li', {'class': 'free-proxy__list__item'}) if proxies: for proxy in proxies: try: ip_port = proxy.text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Не удалось найти элементы ******.") except Exception as e: print(f"Ошибка: {e}") def main_free_proxy_cz(): try: print("Start || Site - free-proxy.cz") scraper = cfscrape.create_scraper() r = scraper.get('http://free-proxy.cz/ru/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', {'id': 'proxy_list'}) if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = f"{proxy.find_all('td')[0].text.strip()}:{proxy.find_all('td')[1].text.strip()}" with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_xseo_in(): try: print("Start || Site - xseo.in") scraper = cfscrape.create_scraper() r = scraper.get('https://xseo.in/freeproxy') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='proxy_list') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = proxy.find('td').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_free_proxy_sale_ukraine(): try: print("Start || Site - free.proxy-sale.com (Ukraine)") scraper = cfscrape.create_scraper() r = scraper.get('https://free.proxy-sale.com/ru/ukraine/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='table') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip = proxy.find('td', class_='ip').text.strip() port = proxy.find('td', class_='port').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(f"{ip}:{port}\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_spaceproxy_net(): try: print("Start || Site - spaceproxy.net") scraper = cfscrape.create_scraper() r = scraper.get('https://spaceproxy.net/free-proxy/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='table') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip_port = proxy.find('td').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(ip_port + "\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_free_proxy_sale(): try: print("Start || Site - free.proxy-sale.com") scraper = cfscrape.create_scraper() r = scraper.get('https://free.proxy-sale.com/ru/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_='main__table-wrap') if table: proxies = table.find('table').find('tbody').find_all('tr') for proxy in proxies: try: ip = proxy.find('td', class_='ip').text.strip() port = proxy.find('td', class_='port').text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(f"{ip}:{port}\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main_hidxxx_name(): try: print("Start || Site - hidxxx.name") scraper = cfscrape.create_scraper() r = scraper.get('https://hidxxx.name/proxy-list/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='proxy__t') if table: proxies = table.find('tbody').find_all('tr') for proxy in proxies: try: ip = proxy.find('td').text.strip() port = proxy.find_all('td')[1].text.strip() with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(f"{ip}:{port}\n") except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main2(): try: print("Start 2 step || Site - free-proxy-list.net") scraper = cfscrape.create_scraper() r = scraper.get('https://free-proxy-list.net') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_='table table-striped table-bordered') if table: line = table.find('tbody').find_all('tr') for tr in line: try: td = tr.find_all('td') ip = td[0].text port = td[1].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + ":" + port + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main3(): try: print("Start 3 step || Site - free.proxy-sale.com") scraper = cfscrape.create_scraper() r = scraper.get('https://free.proxy-sale.com') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_="main__table-wrap") if table: line = table.find('table').find('tbody').find_all('tr') for i in line: try: td = i.find_all('td', class_="ip") for n in td: a = n.find("a").text.replace("Нажмите ⌘-С чтобы скопировать Скопировать в буфер", " ").strip() ip = a with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main4(): try: print("Start 4 step || Site - proxyscrape.com and openproxylist.xyz") scraper = cfscrape.create_scraper() response5 = scraper.get("https://openproxylist.xyz/http.txt") response6 = scraper.get("https://openproxylist.xyz/socks4.txt") response7 = scraper.get("https://openproxylist.xyz/socks5.txt") proxies5 = response5.text.strip() proxies6 = response6.text.strip() proxies7 = response7.text.strip() with open("proxy_scraped.txt", "a") as txt_file: txt_file.write(proxies5+"\n"+proxies6+"\n"+proxies7+"\n") except Exception as e: print(f"Ошибка: {e}") def main5(): try: print("Start 5 step || Site - hidemy.name") scraper = cfscrape.create_scraper() r = scraper.get('https://hidemy.name/ru/proxy-list/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_="table_block") if table: line = table.find('table').find('tbody').find_all('tr') for a in line: try: td = a.find_all('td') ip = td[0].text port = td[1].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + ":" + port + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main7(): try: print("Start 7 step || Site - sslproxies.org") scraper = cfscrape.create_scraper() r = scraper.get(f'https://www.sslproxies.org/#list') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', class_="table table-striped table-bordered") if table: line = table.find('tbody').find_all('tr') for a in line: try: td = a.find_all('td') ip = td[0].text port = td[1].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + ":" + port + "\n")) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main8(): try: print("Start 8 step || Site - spys.one") scraper = cfscrape.create_scraper() r = scraper.get(f'https://spys.one') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('table', cellspacing="0", cellpadding="2") if table: subtable = table.find('table', cellspacing="1", cellpadding="1", align="left") if subtable: line = subtable.find_all('tr', class_="spy1x", onmouseover="this.style.background='#002424'") line2 = subtable.find_all('tr', class_="spy1xx", onmouseover="this.style.background='#002424'") for tr in line: try: td = tr.find_all('td') ip = td[0].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip + '\n')) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") for tr2 in line2: try: td2 = tr2.find_all('td') ip2 = td2[0].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip2 + '\n')) except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Подтаблица с ****** не найдена.") else: print("Основная таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main10(): try: print("Start 10 step || Site - userel.com") scraper = cfscrape.create_scraper() r = scraper.get(f'https://userel.com/') soup = BeautifulSoup(r.text, 'lxml') table = soup.find('div', class_="proxy") if table: line = table.find('table').find_all('tr') for tr in line: try: td = tr.find_all('td') ip = td[0].text with open("proxy_scraped.txt", "a+", encoding="utf-8") as f: f.write(str(ip) + '\n') except AttributeError as ae: print(f"Ошибка при обработке ******: {ae}") else: print("Таблица с ****** не найдена.") except Exception as e: print(f"Ошибка: {e}") def main11(): try: print("Start 11 step || Site - ArchiveProxy") scraper = cfscrape.create_scraper() ear, month, day = str(date.today()).split('-') ear = int(ear) month = int(month) day = int(day) for today in range(day): try: scoc = scraper.get(f'https://checkerproxy.net/api/archive/{ear}-{month}-{today}').json() for i in range(40000): try: with open('proxy_scraped.txt', 'a+') as file: file.write(scoc[i]['addr'] + '\n') except (KeyError, IndexError): pass except Exception as e: print(f"Ошибка при обработке архива: {e}") break except Exception as e: print(f"Ошибка: {e}") if __name__ == "__main__": # Вызов новых функций main_advanced_name() main_best_proxies_ru() main_fineproxy() main_proxyscrape_ru() main_proxyline_net() main_free_proxy_cz() main_xseo_in() main_free_proxy_sale_ukraine() main_spaceproxy_net() main_free_proxy_sale() main_hidxxx_name() # Вызов существующих функций main2() main3() main4() main5() main7() main8() main10() main11() # Очистка от дубликатов print("Duplicates cleaning...") with open("proxy_scraped.txt") as input_: result = dict.fromkeys(input_).keys() with open("proxy_scraped.txt", "w") as output: print(*result, file=output, sep="") print("Duplicates successfully cleared!") После того, как вставили код, запускаете скрипт. Нужно подождать какое-то время, пока скрипт соберёт ****** с открытых сайтов, после чего он удалит дубликаты. Теперь, когда у нас есть файл proxy_scraped.txt, создаём второй файл .py, куда вставляем следующий код. Этот код отвечает за проверку ****** на работоспособность, а также разделяет их по протоколам (socks, http), и сортирует их по странам. Код Чекера import threading import requests import os filename = input("FILENAME: ") timeout = int(input("TIMEOUT: ")) x = 0 y = 0 def check_proxy(proxy): global y protocols = ['http', 'https', 'socks4', 'socks5'] for protocol in protocols: try: proxies = {protocol: f'{protocol}://{proxy}', 'https': f'{protocol}://{proxy}'} response = requests.get('http://ip-api.com/json', proxies=proxies, timeout=timeout) if response.status_code == 200: data = response.json() country = data['country'] folder_path = os.path.join('country', country) file_path = os.path.join(folder_path, f'{protocol}.txt') file_path2 = os.path.join(folder_path, 'ALL.txt') file_path3 = os.path.join("ALL", "ALL.txt") file_path4 = os.path.join("ALL", protocol) os.makedirs(folder_path, exist_ok=True) os.makedirs("ALL", exist_ok=True) with open(file_path, 'a') as f: #country f.write(f'{proxy}\n') with open(file_path2, 'a') as f: #country all f.write(f'{proxy}\n') with open(f"{file_path4}.txt", 'a') as f: f.write(f'{proxy}\n') with open(file_path3, 'a') as f: f.write(f'{proxy}\n') y += 1 except: pass with open(filename, 'r') as f: proxy_list = [line.strip() for line in f] for i in proxy_list: t = threading.Thread(target=check_proxy, args=(i,)).start() x += 1 print(f"\r\rПроверенно: {x}/{len(proxy_list)} Good: {y}", end='') Python import threading import requests import os filename = input("FILENAME: ") timeout = int(input("TIMEOUT: ")) x = 0 y = 0 def check_proxy(proxy): global y protocols = ['http', 'https', 'socks4', 'socks5'] for protocol in protocols: try: proxies = {protocol: f'{protocol}://{proxy}', 'https': f'{protocol}://{proxy}'} response = requests.get('http://ip-api.com/json', proxies=proxies, timeout=timeout) if response.status_code == 200: data = response.json() country = data['country'] folder_path = os.path.join('country', country) file_path = os.path.join(folder_path, f'{protocol}.txt') file_path2 = os.path.join(folder_path, 'ALL.txt') file_path3 = os.path.join("ALL", "ALL.txt") file_path4 = os.path.join("ALL", protocol) os.makedirs(folder_path, exist_ok=True) os.makedirs("ALL", exist_ok=True) with open(file_path, 'a') as f: #country f.write(f'{proxy}\n') with open(file_path2, 'a') as f: #country all f.write(f'{proxy}\n') with open(f"{file_path4}.txt", 'a') as f: f.write(f'{proxy}\n') with open(file_path3, 'a') as f: f.write(f'{proxy}\n') y += 1 except: pass with open(filename, 'r') as f: proxy_list = [line.strip() for line in f] for i in proxy_list: t = threading.Thread(target=check_proxy, args=(i,)).start() x += 1 print(f"\r\rПроверенно: {x}/{len(proxy_list)} Good: {y}", end='') Запускаем скрипт и после того, как чекер выполнит свою работу, он отсортирует рабочие ****** по странам. Также, вы можете воспользоваться этим чекером для проверки своих ******. ****** будут разделены по протоколам, а также, в папке с скриптом будет создана папка "ALL", где будут все ****** без разделения по странам, и без разделения по протоколам. Данные ****** можно использовать как угодно. Выбор за вами! Удачного дня!
честно говоря толку от этих проксей абсолютно никакого,жесткий пинг,фулл паблик,на таких проксях даже простенькие чекеры с пердежом работают,а если что то посложнее то все
лучше бестпрокси за копейки купить, чем это юзать. это адски выдрочин паблик. хз куда они вообще подойти могут
P3psi, да после раздач от новокеков по одной паблик проксе с парсера за симпы,в этот раздел вообще заходить страшно))
P3psi, сейчас тоже ****** понадобились,пособирал старые,прогнал,и дальше в работу их)))Даже они лучше чем с парсера)))