-
當前位置:首頁 > 創(chuàng)意學院 > 技術 > 專題列表 > 正文
openai免費使用(open aip)
大家好!今天讓創(chuàng)意嶺的小編來大家介紹下關于openai免費使用的問題,以下是小編對此問題的歸納整理,讓我們一起來看看吧。
ChatGPT國內免費在線使用,一鍵生成原創(chuàng)文章、方案、文案、工作計劃、工作報告、論文、代碼、作文、做題和對話答疑等等
只需要輸入關鍵詞,就能返回你想要的內容,越精準,寫出的就越詳細,有微信小程序端、在線網頁版、PC客戶端
本文目錄:
一、openai能當爬蟲使嗎
你好,可以的,Spinning Up是OpenAI開源的面向初學者的深度強化學習資料,其中列出了105篇深度強化學習領域非常經典的文章, 見 Spinning Up:
博主使用Python爬蟲自動爬取了所有文章,而且爬下來的文章也按照網頁的分類自動分類好。
見下載資源:Spinning Up Key Papers
源碼如下:
import os
import time
import urllib.request as url_re
import requests as rq
from bs4 import BeautifulSoup as bf
'''Automatically download all the key papers recommended by OpenAI Spinning Up.
See more info on: https://spinningup.openai.com/en/latest/spinningup/keypapers.html
Dependency:
bs4, lxml
'''
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'
}
spinningup_url = 'https://spinningup.openai.com/en/latest/spinningup/keypapers.html'
paper_id = 1
def download_pdf(pdf_url, pdf_path):
"""Automatically download PDF file from Internet
Args:
pdf_url (str): url of the PDF file to be downloaded
pdf_path (str): save routine of the downloaded PDF file
"""
if os.path.exists(pdf_path): return
try:
with url_re.urlopen(pdf_url) as url:
pdf_data = url.read()
with open(pdf_path, "wb") as f:
f.write(pdf_data)
except: # fix link at [102]
pdf_url = r"https://is.tuebingen.mpg.de/fileadmin/user_upload/files/publications/Neural-Netw-2008-21-682_4867%5b0%5d.pdf"
with url_re.urlopen(pdf_url) as url:
pdf_data = url.read()
with open(pdf_path, "wb") as f:
f.write(pdf_data)
time.sleep(10) # sleep 10 seconds to download next
def download_from_bs4(papers, category_path):
"""Download papers from Spinning Up
Args:
papers (bs4.element.ResultSet): 'a' tags with paper link
category_path (str): root dir of the paper to be downloaded
"""
global paper_id
print("Start to ownload papers from catagory {}...".format(category_path))
for paper in papers:
paper_link = paper['href']
if not paper_link.endswith('.pdf'):
if paper_link[8:13] == 'arxiv':
# paper_link = "https://arxiv.org/abs/1811.02553"
paper_link = paper_link[:18] + 'pdf' + paper_link[21:] + '.pdf' # arxiv link
elif paper_link[8:18] == 'openreview': # openreview link
# paper_link = "https://openreview.net/forum?id=ByG_3s09KX"
paper_link = paper_link[:23] + 'pdf' + paper_link[28:]
elif paper_link[14:18] == 'nips': # neurips link
paper_link = "https://proceedings.neurips.cc/paper/2017/file/a1d7311f2a312426d710e1c617fcbc8c-Paper.pdf"
else: continue
paper_name = '[{}] '.format(paper_id) + paper.string + '.pdf'
if ':' in paper_name:
paper_name = paper_name.replace(':', '_')
if '?' in paper_name:
paper_name = paper_name.replace('?', '')
paper_path = os.path.join(category_path, paper_name)
download_pdf(paper_link, paper_path)
print("Successfully downloaded {}!".format(paper_name))
paper_id += 1
print("Successfully downloaded all the papers from catagory {}!".format(category_path))
def _save_html(html_url, html_path):
"""Save requested HTML files
Args:
html_url (str): url of the HTML page to be saved
html_path (str): save path of HTML file
"""
html_file = rq.get(html_url, headers=headers)
with open(html_path, "w", encoding='utf-8') as h:
h.write(html_file.text)
def download_key_papers(root_dir):
"""Download all the key papers, consistent with the categories listed on the website
Args:
root_dir (str): save path of all the downloaded papers
"""
# 1. Get the html of Spinning Up
spinningup_html = rq.get(spinningup_url, headers=headers)
# 2. Parse the html and get the main category ids
soup = bf(spinningup_html.content, 'lxml')
# _save_html(spinningup_url, 'spinningup.html')
# spinningup_file = open('spinningup.html', 'r', encoding="UTF-8")
# spinningup_handle = spinningup_file.read()
# soup = bf(spinningup_handle, features='lxml')
category_ids = []
categories = soup.find(name='div', attrs={'class': 'section', 'id': 'key-papers-in-deep-rl'}).\
find_all(name='div', attrs={'class': 'section'}, recursive=False)
for category in categories:
category_ids.append(category['id'])
# 3. Get all the categories and make corresponding dirs
category_dirs = []
if not os.path.exitis(root_dir):
os.makedirs(root_dir)
for category in soup.find_all(name='h4'):
category_name = list(category.children)[0].string
if ':' in category_name: # replace ':' with '_' to get valid dir name
category_name = category_name.replace(':', '_')
category_path = os.path.join(root_dir, category_name)
category_dirs.append(category_path)
if not os.path.exists(category_path):
os.makedirs(category_path)
# 4. Start to download all the papers
print("Start to download key papers...")
for i in range(len(category_ids)):
category_path = category_dirs[i]
category_id = category_ids[i]
content = soup.find(name='div', attrs={'class': 'section', 'id': category_id})
inner_categories = content.find_all('div')
if inner_categories != []:
for category in inner_categories:
category_id = category['id']
inner_category = category.h4.text[:-1]
inner_category_path = os.path.join(category_path, inner_category)
if not os.path.exists(inner_category_path):
os.makedirs(inner_category_path)
content = soup.find(name='div', attrs={'class': 'section', 'id': category_id})
papers = content.find_all(name='a',attrs={'class': 'reference external'})
download_from_bs4(papers, inner_category_path)
else:
papers = content.find_all(name='a',attrs={'class': 'reference external'})
download_from_bs4(papers, category_path)
print("Download Complete!")
if __name__ == "__main__":
root_dir = "key-papers"
download_key_papers(root_dir)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
二、openai寫的文章能直接用嗎
openai寫的文章能直接用嗎?
回答是:一般情況下上面寫的文章是不可以直接用的因為上面寫的文章也是從網絡上一些文章上面拼湊而來的所以說你要自己的進行一個修改才能夠使用
三、chatgpt怎么下載?
ChatGPT 是一個基于云的自然語言處理服務,無需下載。您可以在支持 ChatGPT 的應用程序或網站上直接使用它。一些應用程序或網站可能需要您創(chuàng)建賬戶或登錄,但您不需要下載 ChatGPT 或其他任何軟件或工具。
如果您想在自己的應用程序或網站中集成 ChatGPT 的功能,您可以訪問 OpenAI 的官方網站,注冊 OpenAI API 的開發(fā)者賬戶,并按照指南獲取 API 密鑰。請注意,OpenAI API 目前處于測試階段,需要提交應用程序才能獲得 API 密鑰。有關如何使用 OpenAI API 和集成 ChatGPT 的更多信息,請參閱 OpenAI 的文檔和開發(fā)者指南。
對于無法使用的用戶可以查詢whysou網看看,com上面有方法可以訪問.
四、openai本地無法使用
openai本地無法使用萬是因為它這個系統(tǒng)不兼容,所以需要把系統(tǒng)進行更新之后才可以使用,因為這是一款隱藏文的,所以如果這個文檔被隱藏的話才會無法使用。
以上就是關于openai免費使用相關問題的回答。希望能幫到你,如有更多相關問題,您也可以聯(lián)系我們的客服進行咨詢,客服也會為您講解更多精彩的知識和內容。
推薦閱讀:
怎樣自定義桌面圖標手機oppo(oppo如何自定義桌面圖標)