Featured image of post 国家大学生就业服务平台岗位爬取

国家大学生就业服务平台岗位爬取

这是一个爬虫项目,利用Selenium和Urllib3爬取国家大学生就业服务平台上的工作岗位并将其输出为json文件。

获取Cookie

​ 起初在进入到岗位页面时,发现无需登录也能访问岗位列表,因此打算直接从网络中找到返回工作列表的请求,通过侧边栏搜索相关字段,定位请求位置:

1

2

获取其中的请求URL和Cookie等参数,构造请求头并发起请求。在此过程中发现请求URL中最后一段数字是在不断变化的:

查询网页内部的js文件发现,该串数字和时间有关,因此大胆猜测这段数字可能是时间戳,采用代码测试:

1
2
3
4
5
6
import time
def generate_timestamp():
    """生成13位时间戳"""
    return str(int(time.time() * 1000))
print(generate_timestamp())
#output->1755764369564

代码测试结果与URL的末端数字相似,因此得证。但在爬取的过程中,发现在未登录状态下只能浏览前10页内容。因此需要解决登录问题,找到相关请求:

根据相关字段向该URL提交表单并获取登录后的Cookie,但用Python获取Cookie后发现依旧无法获取,返回的结果:

一开始我怀疑是获取的Cookie有问题,因此使用代码分析直接从网页上获取的Cookie的变化,试图找到规律:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from difflib import  SequenceMatcher
#将Cookie进行切割
def Cookie_split(Cookie):
    fragments = Cookie.split("; ")
    dic = {}
    for fragment in fragments:
        key,value = fragment.split('=',1)
        dic[key] = value
    return dic


def compare_strings(str1,str2):
    matcher = SequenceMatcher(None,str1,str2)
    for op , i1 , i2 ,j1 ,j2 in matcher.get_opcodes():
        if op == 'replace':
            print(f"在位置{i1}替换了“{str1[i1:i2]}”为“{str2[j1:j2]}”")
        elif op == 'delete':
            print(f"在位置{i1}删除了“{str1[i1:i2]}”")
        elif op == 'insert':
            print(f"在位置{i1}插入了“{str1[i1:i2]}”")

#Cookie比较
def Cookie_compare(Cookie1,Cookie2):
    """比较两个Cookie的差别"""
    cookie_dic1 = Cookie_split(Cookie1)
    cookie_dic2 = Cookie_split(Cookie2)

    key_list1 = cookie_dic1.keys()
    key_list2 = cookie_dic2.keys()
    print(key_list1)
    print(key_list2)

    # Cookie中属性的比较
    diff = list(set(key_list1) ^ set(key_list2))
    if diff == []:
        print("两个Cookie在属性上一致")
        for key in key_list1:
            print(f"|--------------------------{key}的差异点-------------------------|")
            compare_strings(cookie_dic1[key],cookie_dic2[key])
            print(f"|--------------------------------------------------------------|")

    else:
        print(f"两个Cookie的属性差异:{diff}")
        same_attribute = list(set(key_list1) & set(key_list2))
        for key in same_attribute:
            print(f"|--------------------------{key}的差异点-------------------------|")
            compare_strings(cookie_dic1[key],cookie_dic2[key])
            print(f"|--------------------------------------------------------------|")


if __name__ == "__main__":
    Cookie_compare(Cookie6,Cookie7)

在比较的过程中发现,当网页保持登录状态时,从网页获取的Cookie可以正常使用,而当关闭网页后,之前获取的Cookie就失效了,因此怀疑整个流程如图:

所以如果要保持Cookie的有效性,就必须保持登录状态。 因此考虑使用Selenium进行模拟登录,再从其中获取组成Cookie所需的相关字段。事实上,能直接获取的字段并不全,因此通过上面的比较代码,找到Cookie中的不变(有时不变)部分和变动(每次登录都会改变)部分,幸运的是变动部分都能在直接获取的字段中找到。

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def keep_connect(chrome_location,username,password):
    """使用selenium模拟登录,获取Cookie,并维持登录状态防止服务器端将Cookie注销"""
    options = webdriver.ChromeOptions()
    options.binary_location = chrome_location
    options.add_argument("ignore-certificate-errors")
    options.add_experimental_option("detach", True)
    driver = webdriver.Chrome(options=options, executable_path='../chromedriver.exe')

    login_url = 'https://account.chsi.com.cn/passport/login?service=https://www.ncss.cn/student/connect/chsi&entrytype=stu'
    driver.get(login_url)
    time.sleep(1)

    acc_input = driver.find_element(By.XPATH, r'/html/body/div/div[2]/div[2]/div/div[2]/form/div[1]/input')
    acc_input.send_keys(username)

    pwd_input = driver.find_element(By.XPATH, f'/html/body/div/div[2]/div[2]/div/div[2]/form/div[2]/input')
    pwd_input.send_keys(password)
    driver.find_element(By.XPATH, r'/html/body/div/div[2]/div[2]/div/div[2]/form/div[4]').click()

    element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, r"/html/body/div/span/span/a[1]")))
    element.click()

    dic = {}

    for i in driver.get_cookies():
        dic[i['name']] = i['value']

    return dic


def Cookie_concat(got_property):
    """将从keep_connect函数中获取到的Cookie数据进行重新包装"""
    changed_property_list = ['SESSION', 'Hm_lpvt_378ff17a1ac046691cf78376632d1ed4', '_ga','_ga_6CXWRD3K0D']

    no_change = {
        '_ga_1ESVLDHDYL': 'GS1.1.1726503021.3.0.1726503021.0.0.0',
        '_abfpc': 'd7760a7e8d00953eaa18b111273b00d6ad83f1cc_2.0',
        'cna': 'eac1e178d67b00529df2b287b5842f2c',
        '_gid': 'GA1.2.673435621.1755336171',
        'aliyungf_tc': 'bfc4565f794c9e6ac253a5c09f4a937f2afb69e01fc7b413d486aaeaadf06249',
        'XSRF-CCKTOKEN': 'a12b3436b08549ba2f33a4aefa9a1454',
        'CHSICC_CLIENTFLAGNCSS': 'aba5b818eaa8bc94d6fb1ddf17d1df4f',
        'CHSICC01': '!DzrVNB/pHD1H78bzYxYLahOzddj6Y4XQ6NJ5RnOPIOyzHzKixC+5X5WINIjztT+S4x5PGaf/cowaI/Q=',
        'CHSICC_CLIENTFLAGSTUDENT': '5d0ab9cce044f18a699886e7d6705555',
        'Hm_lvt_378ff17a1ac046691cf78376632d1ed4': '1754926580,1754968150,1755336169,1755411478',
        'HMACCOUNT': 'CEB955474E107530',
        'acw_tc': 'ac11000117554230396983911ee7b259a823321b52a20a878c0b42ee677273',
        '_gat_gtag_UA_105074615_1': '1'
    }

    property_all = no_change.copy()

    for i in changed_property_list:
        property_all[i] = got_property[i]
    property_rank = ['SESSION','_ga_1ESVLDHDYL','_abfpc','cna','_gid','aliyungf_tc','acw_tc','XSRF-CCKTOKEN','CHSICC_CLIENTFLAGNCSS','CHSICC01','CHSICC_CLIENTFLAGSTUDENT','Hm_lvt_378ff17a1ac046691cf78376632d1ed4','HMACCOUNT','_gat_gtag_UA_105074615_1','Hm_lpvt_378ff17a1ac046691cf78376632d1ed4','_ga','_ga_6CXWRD3K0D']

    Cookie=''

    for i in property_rank:
        Cookie = Cookie + i + '=' + property_all[i] + '; '
    print('完成Cookie创建')

    return Cookie


def simulate_login_get_cookie(chrome_location,username,password):
    """将模拟登录部分和包装Cookie部分整合在一起"""
    dic = keep_connect(chrome_location=chrome_location,username=username,password=password)
    return Cookie_concat(dic)

获取数据

​ 数据分为两类,一类是岗位列表数据,另一类是岗位详情页数据。

岗位列表数据

​ 岗位列表数据即如图所示:

该类数据可以直接从返回的响应中的"data"–>“list"中找到,相关代码为:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def get_job_data(http,Cookie,page=1,page_size=10):
    """获取岗位数据"""
    baseurl = "https://www.ncss.cn/student/jobs/jobslist/ajax/"
    #构造请求头
    headers = {
        'User-Agent':ua.random,
        'Connection':'keep-alive',
        'Cookie':Cookie,
        "Accept": "application/json,*/*",
        'Referer':"https://account.chsi.com.cn/passport/login?service=https://job.ncss.cn/student/connect/chsi&entrytype=stu"
    }
    
    # 查询参数
    params = {
        "jobType": "",
        "areaCode": "",
        "jobName": "",
        "monthPay": "",
        "industrySectors": "",
        "property": "",
        "categoryCode": "",
        "memberLevel": "",
        "recruitType": "",
        "offset": page,
        "limit": page_size,
        "keyUnits": "",
        "degreeCode": "",
        "sourcesName": "0",
        "sourcesType": "",
        "_": generate_timestamp()  # 动态时间戳
    }

    try:
        resp = http.request(
            method='GET',
            url=baseurl,
            fields=params,
            headers=headers
        )
        if resp.status in [403,401]:
            print(f"请求第{page}页时登录过期")
            return None
        elif resp.status == 200:
            data = json.loads(resp.data.decode('UTF-8'))
            return data['data']['list']
        else:
            print(f"请求第{page}页时错误,状态码{resp.status}")
            return None

    except Exception as e:
        print(f"请求第{page}页时发生错误:{e}")
        return None

详情页数据

​ 详情页数据主要为详情页中的岗位介绍部分:

该部分主要是使用BeautifulSoup对返回的结果进行解析,找到相关属性内容的位置并进行解析,代码如下:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def fetch_detail_page(job_id,http,Cookie):
    """获取详情页数据"""

    headers = {
        'User-Agent':ua.random,
        'Cookie':Cookie,
        'Referer':'https://job.ncss.cn/student/jobs/index.html'
    }

    detail_url = f"https://www.ncss.cn/student/jobs/{job_id}/detail.html"
    try:
        resp = http.request(
            'GET',
            detail_url,
            headers = headers
        )
        if resp.status == 200:
            return resp.data.decode('utf-8')
        elif resp.status in [401,403]:
            print(f"获取{job_id}详情页时登录过期")
            return None
        else:
            print(f"请求失败,状态码{resp.status}")
            return None
    except Exception as e:
        print(f"请求失败,{str(e)}")
        return None



def pares_detail_job_info(html,job_info):
    """将爬取到的详情页进行解析,获取“岗位介绍”部分的数据"""
    soup = BeautifulSoup(html,'html.parser')

    job_detail_describe_div =soup.find(name='pre',attrs={'class':"mainContent mainContent"})
    job_detail_describe = job_detail_describe_div.getText()

    job_info.update({
        "岗位介绍":job_detail_describe
    })

    return job_info

效率与反爬

​ 考虑到爬取的数据量稍多,因此使用Urllib3进行分布式爬取,一个主进程,两个工作线程(岗位列表和详情页),采用消费者-生产者模型,同时使用fake_useragent模块不断改变User-Agent的值,代码如下:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def process_manager(total_pages,http,Cookie,num_list_workers=2,num_detail_workers=8,output_address=""):
    """进程管理器(主进程)"""
    global  enqueued_jobs_count
    start_time = time.time()
    print(f"开始爬取任务,总页数: {total_pages}")

    #创建Manager
    with Manager() as manager:
        #创建队列
        list_page_queue = manager.Queue()
        detail_task_queue = manager.Queue(maxsize=5000)
        result_queue = manager.Queue()

        #添加列表页任务
        for page in range(1,total_pages+1):
            list_page_queue.put(page)

        expected_total_list_pages = total_pages
        expected_total_detail_jobs = total_pages * 10

        #进度条
        list_pbar = tqdm(total=expected_total_list_pages,desc='列表页',unit='页',dynamic_ncols=True,file=sys.stdout)
        detail_pbar = tqdm(total=expected_total_detail_jobs,desc='详情页',unit='项',dynamic_ncols=True,file=sys.stdout)

        #创建并启动线程
        threads = []

        #列表页工作线程
        for i in range(num_list_workers):
            t = threading.Thread(
                target=list_page_worker,
                args=(list_page_queue,http,Cookie,detail_task_queue,list_pbar),
                name=f"ListWorker-{i+1}",
                daemon=True
            )
            t.start()
            threads.append(t)

        #详情页工作线程
        for i in range(num_detail_workers):
            t = threading.Thread(
                target=detail_page_worker,
                args=(detail_task_queue,result_queue,http,Cookie,detail_pbar),
                name=f"DetailWorker-{i+1}",
                daemon=True
            )
            t.start()
            threads.append(t)

        #结果写入线程
        writer_thread = threading.Thread(
            target=result_writer,
            args=(result_queue,output_address),
            name="ResultWriter",
            daemon=True
        )
        writer_thread.start()

        #等待列表页队列为空(等待完成列表页任务)
        list_page_queue.join()

        #通知列表页任务线程退出
        for _ in range(num_list_workers):
            list_page_queue.put(None)
        print(f"所有列表页任务已完成,{num_list_workers}个工作线程已退出")


        #调整详情任务总数
        with progress_lock:
            actual_enqueued = enqueued_jobs_count
        if actual_enqueued > 0 and actual_enqueued < detail_pbar.total:
            detail_pbar.total = actual_enqueued
            detail_pbar.refresh()


        #等待详情页任务队列为空(详情页任务完成)
        detail_task_queue.join()

        #通知详情页工作线程退出
        for _ in range(num_detail_workers):
            detail_task_queue.put(None)
        print(f"所有详情页任务已完成,{num_detail_workers}个工作线程已退出")


        #通知结果写入线程退出
        result_queue.put(None)

        for t in threads:
            t.join(timeout=10)

        writer_thread.join(timeout=10)

        elapsed_time = time.time() - start_time

        list_pbar.close()
        detail_pbar.close()

        print(f"爬取完成! 总耗时: {elapsed_time:.2f} 秒")

项目流程图

完整代码

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
import queue
import random
import threading
import time
from multiprocessing import Manager
from bs4 import BeautifulSoup
import urllib3
import json
from tqdm.auto import tqdm
import sys
from  fake_useragent import UserAgent
from selenium import  webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait


#抑制认证警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


def keep_connect(chrome_location,username,password):
    """使用selenium模拟登录,获取Cookie,并维持登录状态防止服务器端将Cookie注销"""
    options = webdriver.ChromeOptions()
    options.binary_location = chrome_location
    options.add_argument("ignore-certificate-errors")
    options.add_argument("--headless")
    options.add_argument("--disable-gpu")
    options.add_argument("--disable-software-rasterizer")
    options.add_experimental_option("detach", True)
    driver = webdriver.Chrome(options=options,executable_path='../chromedriver.exe')

    login_url = 'https://account.chsi.com.cn/passport/login?service=https://www.ncss.cn/student/connect/chsi&entrytype=stu'
    driver.get(login_url)
    time.sleep(1)

    acc_input = driver.find_element(By.XPATH, r'/html/body/div/div[2]/div[2]/div/div[2]/form/div[1]/input')
    acc_input.send_keys(username)

    pwd_input = driver.find_element(By.XPATH, f'/html/body/div/div[2]/div[2]/div/div[2]/form/div[2]/input')
    pwd_input.send_keys(password)
    driver.find_element(By.XPATH, r'/html/body/div/div[2]/div[2]/div/div[2]/form/div[4]').click()

    element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, r"/html/body/div/span/span/a[1]")))
    element.click()

    dic = {}

    for i in driver.get_cookies():
        dic[i['name']] = i['value']

    return dic


def Cookie_concat(got_property):
    """将从keep_connect函数中获取到的Cookie数据进行重新包装"""
    changed_property_list = ['SESSION', 'Hm_lpvt_378ff17a1ac046691cf78376632d1ed4', '_ga','_ga_6CXWRD3K0D']

    no_change = {
        '_ga_1ESVLDHDYL': 'GS1.1.1726503021.3.0.1726503021.0.0.0',
        '_abfpc': 'd7760a7e8d00953eaa18b111273b00d6ad83f1cc_2.0',
        'cna': 'eac1e178d67b00529df2b287b5842f2c',
        '_gid': 'GA1.2.673435621.1755336171',
        'aliyungf_tc': 'bfc4565f794c9e6ac253a5c09f4a937f2afb69e01fc7b413d486aaeaadf06249',
        'XSRF-CCKTOKEN': 'a12b3436b08549ba2f33a4aefa9a1454',
        'CHSICC_CLIENTFLAGNCSS': 'aba5b818eaa8bc94d6fb1ddf17d1df4f',
        'CHSICC01': '!DzrVNB/pHD1H78bzYxYLahOzddj6Y4XQ6NJ5RnOPIOyzHzKixC+5X5WINIjztT+S4x5PGaf/cowaI/Q=',
        'CHSICC_CLIENTFLAGSTUDENT': '5d0ab9cce044f18a699886e7d6705555',
        'Hm_lvt_378ff17a1ac046691cf78376632d1ed4': '1754926580,1754968150,1755336169,1755411478',
        'HMACCOUNT': 'CEB955474E107530',
        'acw_tc': 'ac11000117554230396983911ee7b259a823321b52a20a878c0b42ee677273',
        '_gat_gtag_UA_105074615_1': '1'
    }

    property_all = no_change.copy()

    for i in changed_property_list:
        property_all[i] = got_property[i]
    property_rank = ['SESSION','_ga_1ESVLDHDYL','_abfpc','cna','_gid','aliyungf_tc','acw_tc','XSRF-CCKTOKEN','CHSICC_CLIENTFLAGNCSS','CHSICC01','CHSICC_CLIENTFLAGSTUDENT','Hm_lvt_378ff17a1ac046691cf78376632d1ed4','HMACCOUNT','_gat_gtag_UA_105074615_1','Hm_lpvt_378ff17a1ac046691cf78376632d1ed4','_ga','_ga_6CXWRD3K0D']

    Cookie=''

    for i in property_rank:
        Cookie = Cookie + i + '=' + property_all[i] + '; '
    print("------------------完成Cookie创建------------------")

    return Cookie


def simulate_login_get_cookie(chrome_location,username,password):
    """将模拟登录部分和包装Cookie部分整合在一起"""
    dic = keep_connect(chrome_location=chrome_location,username=username,password=password)
    return Cookie_concat(dic)


ua = UserAgent()


def generate_timestamp():
    """生成13位时间戳"""
    return str(int(time.time() * 1000))



def get_job_data(http,Cookie,page=1,page_size=10):
    """获取岗位数据"""
    baseurl = "https://www.ncss.cn/student/jobs/jobslist/ajax/"
    #构造请求头
    headers = {
        'User-Agent':ua.random,
        'Connection':'keep-alive',
        'Cookie':Cookie,
        "Accept": "application/json,*/*",
        'Referer':"https://account.chsi.com.cn/passport/login?service=https://job.ncss.cn/student/connect/chsi&entrytype=stu"
    }
    
    # 查询参数
    params = {
        "jobType": "",
        "areaCode": "",
        "jobName": "",
        "monthPay": "",
        "industrySectors": "",
        "property": "",
        "categoryCode": "",
        "memberLevel": "",
        "recruitType": "",
        "offset": page,
        "limit": page_size,
        "keyUnits": "",
        "degreeCode": "",
        "sourcesName": "0",
        "sourcesType": "",
        "_": generate_timestamp()  # 动态时间戳
    }

    try:
        resp = http.request(
            method='GET',
            url=baseurl,
            fields=params,
            headers=headers
        )
        if resp.status in [403,401]:
            tqdm.write(f"请求第{page}页时登录过期")
            return None
        elif resp.status == 200:
            data = json.loads(resp.data.decode('UTF-8'))
            return data['data']['list']
        else:
            tqdm.write(f"请求第{page}页时错误,状态码{resp.status}")
            return None

    except Exception as e:
        tqdm.write(f"请求第{page}页时发生错误:{e}")
        return None




def fetch_detail_page(job_id,http,Cookie):
    """获取详情页数据"""

    headers = {
        'User-Agent':ua.random,
        'Cookie':Cookie,
        'Referer':'https://job.ncss.cn/student/jobs/index.html'
    }

    detail_url = f"https://www.ncss.cn/student/jobs/{job_id}/detail.html"
    try:
        resp = http.request(
            'GET',
            detail_url,
            headers = headers
        )
        if resp.status == 200:
            return resp.data.decode('utf-8')
        elif resp.status in [401,403]:
            tqdm.write(f"获取{job_id}详情页时登录过期")
            return None
        else:
            tqdm.write(f"请求失败,状态码{resp.status}")
            return None
    except Exception as e:
        tqdm.write(f"请求失败,{str(e)}")
        return None



def pares_detail_job_info(html,job_info):
    """将爬取到的详情页进行解析,获取“岗位介绍”部分的数据"""
    soup = BeautifulSoup(html,'html.parser')

    job_detail_describe_div =soup.find(name='div',attrs={'class':"details"})
    if job_detail_describe_div is not None:
        job_detail_describe = job_detail_describe_div.getText()
    else:
        tqdm.write(f"未找到岗位描述元素,岗位ID:{job_info.get('岗位ID')}")
        job_detail_describe = "未知"

    job_info.update({
        "岗位介绍":job_detail_describe
    })

    return job_info



def parse_job_info(job):
    """解析岗位信息"""
    id = job.get("jobId")
    timeStamp = job.get("updateDate")
    timeArray = time.localtime(float(timeStamp)/1000)

    return {
        "岗位ID":id,
       "职位名称":job.get("jobName"),
        "薪资水平":str(job.get("lowMonthPay"))+'k-'+str(job.get('highMonthPay'))+'k',
        "招聘人数":job.get("headCount"),
        "学历要求":job.get("degreeName"),
        "招聘方":job.get("recName"),
        "公司规模":job.get("recScale"),
        "地区":job.get("areaCodeName"),
        "福利":job.get("recTags"),
        "专业要求":job.get("major"),
        "岗位更新时间":time.strftime("%Y-%m-%d %H:%M:%S",timeArray),
        "详情网址":f"https://www.ncss.cn/student/jobs/{id}/detail.html"
    }





def list_page_worker(list_page_queue, http, Cookie, detail_task_queue, list_pbar):
    """获取岗位列表的工作流程"""
    global list_get_wrong, list_pages_done, enqueued_jobs_count

    while True:
        page = None
        try:
            page = list_page_queue.get(timeout=30)

            if page is None:
                list_page_queue.task_done()   # 对应主线程 put(None)
                break

            # 爬取列表页
            jobs = get_job_data(http=http, Cookie=Cookie, page=page, page_size=10)
            if jobs:
                for job in jobs:
                    job_info = parse_job_info(job)
                    detail_task_queue.put(job_info)
                    with progress_lock:
                        enqueued_jobs_count += 1
            time.sleep(random.uniform(0.5, 1.5))

        except queue.Empty:
            thread_name = threading.current_thread().name
            tqdm.write(f"列表页任务队列空,线程 {thread_name} 准备退出")
            continue

        except Exception as e:
            tqdm.write(f"列表页工作线程异常: {str(e)}")
            if page:
                list_get_wrong.append(page)
            time.sleep(5)

        finally:
            if page is not None:  # 只在真实任务时更新
                list_pages_done += 1
                list_pbar.update(1)
                list_page_queue.task_done()




def detail_page_worker(detail_task_queue,result_queue,http,Cookie,detail_pbar):
    """详情页工作流程"""

    global detail_get_wrong,detail_jobs_done

    while True:
        job_info = None
        job_id =None
        try:
            job_info = detail_task_queue.get(timeout=30)

            if job_info is None:
                detail_task_queue.task_done()
                break

            job_id = job_info.get("岗位ID")

            # print(f"开始爬取详情页{job_id}")

            html = fetch_detail_page(job_id, http, Cookie)

            if html:
                new_job_info = pares_detail_job_info(html, job_info)
                result_queue.put(new_job_info)
            # print(f"详情页{job_id}解析完成")
            else:
                tqdm.write(f"详情页{job_id}爬取失败")

            time.sleep(random.uniform(0.3, 0.8))

        except queue.Empty:
            thread_name = threading.current_thread().name
            tqdm.write(f"详情页任务队列空,线程{thread_name}准备退出")
            continue

        except Exception as e:
            tqdm.write(f"详情页工作线程异常: {str(e)}")
            if job_id:
                detail_get_wrong.append(job_id)
            time.sleep(3)
        finally:
            if job_info is not None:
                detail_jobs_done += 1
                detail_pbar.update(1)
                detail_task_queue.task_done()


def result_writer(result_queue, output_address):
    """结果写入线程"""
    struc_time = time.localtime()
    time_year = struc_time.tm_year
    time_month = struc_time.tm_mon
    time_day = struc_time.tm_mday

    output_file = output_address + f'{time_year}_{time_month}_{time_day}_jobs'

    count = 0
    with open(output_file, "a", encoding='utf-8', newline='') as f:
        while True:
            try:
                # 获取结果
                result = result_queue.get(timeout=120)
                if result is None:
                    break

                f.write(json.dumps(result, ensure_ascii=False) + "\n")
                f.flush()

                count += 1

                if count % 100 == 0:
                    tqdm.write(f"已写入 {count} 条结果")

                result_queue.task_done()
            except queue.Empty:
                tqdm.write("结果写入线程超时退出")
                break
            except Exception as e:
                tqdm.write(f"结果写入异常: {str(e)}")

    tqdm.write(f"结果写入完成,共写入 {count} 条数据")



def process_manager(total_pages,http,Cookie,num_list_workers=2,num_detail_workers=8,output_address=""):
    """进程管理器(主进程)"""
    global  enqueued_jobs_count
    start_time = time.time()
    tqdm.write(f"开始爬取任务,总页数: {total_pages}")

    #创建Manager
    with (Manager() as manager):
        #创建队列
        list_page_queue = manager.Queue()
        detail_task_queue = manager.Queue(maxsize=5000)
        result_queue = manager.Queue()

        #添加列表页任务
        for page in range(1,total_pages+1):
            list_page_queue.put(page)

        expected_total_list_pages = total_pages
        expected_total_detail_jobs = total_pages * 10

        #进度条
        list_pbar = tqdm(total=expected_total_list_pages,desc='列表页',unit='页',dynamic_ncols=True,file=sys.stdout)
        detail_pbar = tqdm(total=expected_total_detail_jobs,desc='详情页',unit='项',dynamic_ncols=True,file=sys.stdout)

        #创建并启动线程
        threads = []

        #列表页工作线程
        for i in range(num_list_workers):
            t = threading.Thread(
                target=list_page_worker,
                args=(list_page_queue,http,Cookie,detail_task_queue,list_pbar),
                name=f"ListWorker-{i+1}",
                daemon=True
            )
            t.start()
            threads.append(t)

        #详情页工作线程
        for i in range(num_detail_workers):
            t = threading.Thread(
                target=detail_page_worker,
                args=(detail_task_queue,result_queue,http,Cookie,detail_pbar),
                name=f"DetailWorker-{i+1}",
                daemon=True
            )
            t.start()
            threads.append(t)

        #结果写入线程
        writer_thread = threading.Thread(
            target=result_writer,
            args=(result_queue,output_address),
            name="ResultWriter",
            daemon=True
        )
        writer_thread.start()

        #等待列表页队列为空(等待完成列表页任务)
        list_page_queue.join()

        #通知列表页任务线程退出
        for _ in range(num_list_workers):
            list_page_queue.put(None)
        tqdm.write(f"所有列表页任务已完成,{num_list_workers}个工作线程已退出")


        #调整详情任务总数
        with progress_lock:
            actual_enqueued = enqueued_jobs_count
        if actual_enqueued > 0 and actual_enqueued < detail_pbar.total:
            detail_pbar.total = actual_enqueued
            detail_pbar.refresh()


        #等待详情页任务队列为空(详情页任务完成)
        detail_task_queue.join()

        #通知详情页工作线程退出
        for _ in range(num_detail_workers):
            detail_task_queue.put(None)
        tqdm.write(f"所有详情页任务已完成,{num_detail_workers}个工作线程已退出")


        #通知结果写入线程退出
        result_queue.put(None)

        for t in threads:
            t.join(timeout=10)

        writer_thread.join(timeout=10)

        elapsed_time = time.time() - start_time

        list_pbar.close()
        detail_pbar.close()

        print(f"爬取完成! 总耗时: {elapsed_time:.2f} 秒")


if __name__ == "__main__":
    #全局变量
    progress_lock = threading.Lock()
    list_pages_done = 0
    detail_jobs_done =0
    enqueued_jobs_count = 0
    # 配置爬取参数
    TOTAL_PAGES = 200  # 要爬取的列表页总数
    NUM_LIST_WORKERS = 2  # 列表页工作线程数
    NUM_DETAIL_WORKERS = 5  # 详情页工作线程数
    OUTPUT_ADDRESS = ""  # 输出文件的地址,默认为项目地址
    #配置模拟登录参数
    chrome_location = r'D:\python_project\Google\Chrome\Application\chrome.exe' #chrome启动器的位置
    username = "" #模拟登录的账号
    password = ""#模拟登陆的密码

    detail_get_wrong = [] #获取详情失败的job_id
    list_get_wrong = [] #获取岗位失败的页码


    #构建连接池
    print("------------------开始构建连接池------------------")
    http = urllib3.PoolManager(
        num_pools=50,
        maxsize=50,
        cert_reqs='CERT_NONE',
        assert_hostname=False,
        block=True,
        timeout=urllib3.Timeout(connect=5.0, read=10.0),
        retries=urllib3.Retry(total=3, backoff_factor=0.5),
    )
    print("------------------完成构建连接池------------------")

    #模拟登录,维持登录状态,获取组成Cookie的必要的参数
    print("-------------------开始模拟登录-------------------")
    Cookie = simulate_login_get_cookie(chrome_location=chrome_location,username=username,password=password)
    print("-------------------完成模拟登录-------------------")

    # 启动爬虫
    print("-------------------开始网络爬取-------------------")
    process_manager(
        total_pages=TOTAL_PAGES,
        num_list_workers=NUM_LIST_WORKERS,
        num_detail_workers=NUM_DETAIL_WORKERS,
        output_address=OUTPUT_ADDRESS,
        http = http,
        Cookie = Cookie
    )
    print("-------------------完成网络爬取-------------------")

问题 OR 改进

  1. 当前的爬取仍需要登录才能获取,如何才能以非登录状态获取所需的数据
  2. 可以增加代理,从而不断变动IP使得不易被锁IP
恍如昨日,嗤笑今朝
使用 Hugo 构建
主题 StackJimmy 设计