You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

77 lines
2.7 KiB

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

from flask import Blueprint, abort, render_template, request, session
from module.article import Article
from module.comment import Comment
from module.credit import Credit
from module.thumb import Thumb
from module.user import Users
from bs4 import BeautifulSoup
from module.favorite import Favorite
article = Blueprint("article",__name__)
@article.route('/article/<int:articleid>')
def read(articleid):
try:
result = Article().find_by_id(articleid)
if result is None:
abort(404)
# 使用BeautifulSoup来解析HTML内容
soup = BeautifulSoup(result.content, 'html.parser')
# 假设我们只显示前一半的<p>标签
paragraphs = soup.find_all('p')
half_point = len(paragraphs) // 2 # 整数除法来找到中点
content_half = ''.join([str(p) for p in soup.find_all('p')[:half_point]])
# 你可以在这里处理结果_last和result_next但注意边界条件
# result_last = Article().find_by_id(articleid - 1) if articleid > 1 else None
result_last = Article().find_max_less_than_id(articleid)
result_next = Article().find_min_greater_than_id(articleid)
# 每阅读一次,阅读次数加一
Article().update_readcount(articleid)
# 是否看过
payed = Credit().check_payed_article(articleid) # 如果看过返回TRUE
# 是否收藏
is_favorited = Favorite().is_at_favorite(articleid)
# 传递评论的信息 这里的comment是指这一篇文章的所有的评论
comment = Comment().find_comment_with_user(articleid=articleid)
return render_template('article-user.html',
result=result,
result_content=content_half, # 传递前半部分的内容
result_last=result_last,
result_next=result_next,
position = half_point,
payed = payed,
is_favorited = is_favorited,
comment = comment)
except:
abort(500)
@article.route('/readall',methods=['POST'])
def read_all():
position = int(request.form.get('position'))
articleid = request.form.get('articleid')
article = Article()
result = article.find_by_id(articleid)
# 使用BeautifulSoup来解析HTML内容
soup = BeautifulSoup(result.content, 'html.parser')
content_half = ''.join([str(p) for p in soup.find_all('p')[position:]])
# 插入积分消耗明细
Credit().insert_detail(category=5,target=articleid,credit=(-1)*result.credit)
# 减少积分表的剩余积分
Users().update_credit((-1)*result.credit)
return content_half