0.0.1 init, main page prepare
This commit is contained in:
2
BaseModels/search_optimization/OEMBED/OEMBED_data.py
Normal file
2
BaseModels/search_optimization/OEMBED/OEMBED_data.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# https://oembed.com/
|
||||
# https://habr.com/ru/post/141303/
|
||||
1
BaseModels/search_optimization/RSS/ya_RSS_chanel.py
Normal file
1
BaseModels/search_optimization/RSS/ya_RSS_chanel.py
Normal file
@@ -0,0 +1 @@
|
||||
# https://yandex.ru/dev/turbo/doc/quick-start/articles.html
|
||||
0
BaseModels/search_optimization/__init__.py
Normal file
0
BaseModels/search_optimization/__init__.py
Normal file
2
BaseModels/search_optimization/google_AMP/AMP.py
Normal file
2
BaseModels/search_optimization/google_AMP/AMP.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# https://amp.dev/ru/
|
||||
# https://www.seonews.ru/analytics/optimization-2020-vnedrenie-amp-dlya-internet-magazina-bez-poteri-konversii-v-google/
|
||||
@@ -0,0 +1 @@
|
||||
# https://developers.google.com/search/docs/advanced/appearance/enable-web-stories?hl=ru#google-discover
|
||||
1
BaseModels/search_optimization/google_tips
Normal file
1
BaseModels/search_optimization/google_tips
Normal file
@@ -0,0 +1 @@
|
||||
https://developers.google.com/search/docs/beginner/seo-starter-guide?hl=ru#understand_your_content
|
||||
0
BaseModels/search_optimization/ld_json/__init__.py
Normal file
0
BaseModels/search_optimization/ld_json/__init__.py
Normal file
38
BaseModels/search_optimization/ld_json/ld_ speakebale.py
Normal file
38
BaseModels/search_optimization/ld_json/ld_ speakebale.py
Normal file
@@ -0,0 +1,38 @@
|
||||
|
||||
|
||||
def get_ld_speakebale(name, theme_xpath, info_xpath, url):
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org/",
|
||||
"@type": "WebPage",
|
||||
"name": name,
|
||||
"speakable": {
|
||||
"@type": "SpeakableSpecification",
|
||||
"xPath": [
|
||||
theme_xpath,
|
||||
info_xpath
|
||||
]
|
||||
},
|
||||
"url": url
|
||||
}
|
||||
|
||||
return data
|
||||
|
||||
# <title>Speakable markup example</title>
|
||||
# <meta name="description" content="This page is all about the quick brown fox" />
|
||||
# <script type="application/ld+json">
|
||||
# {
|
||||
# "@context": "https://schema.org/",
|
||||
# "@type": "WebPage",
|
||||
# "name": "Quick Brown Fox",
|
||||
# "speakable":
|
||||
# {
|
||||
# "@type": "SpeakableSpecification",
|
||||
# "xPath": [
|
||||
# "/html/head/title",
|
||||
# "/html/head/meta[@name='description']/@content"
|
||||
# ]
|
||||
# },
|
||||
# "url": "http://www.quickbrownfox_example.com/quick-brown-fox"
|
||||
# }
|
||||
# </script>
|
||||
22
BaseModels/search_optimization/ld_json/ld_FAQ.py
Normal file
22
BaseModels/search_optimization/ld_json/ld_FAQ.py
Normal file
@@ -0,0 +1,22 @@
|
||||
|
||||
|
||||
def get_ld_FAQ(data_Dict):
|
||||
|
||||
FAQ_list = []
|
||||
for key, val in data_Dict.items():
|
||||
FAQ_list.append({
|
||||
"@type": "Question",
|
||||
"name": key,
|
||||
"acceptedAnswer": {
|
||||
"@type": "Answer",
|
||||
"text": val
|
||||
}
|
||||
})
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "FAQPage",
|
||||
"mainEntity": FAQ_list
|
||||
}
|
||||
|
||||
return data
|
||||
36
BaseModels/search_optimization/ld_json/ld_QA.py
Normal file
36
BaseModels/search_optimization/ld_json/ld_QA.py
Normal file
@@ -0,0 +1,36 @@
|
||||
|
||||
|
||||
def get_ld_QA(data_Dict):
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "QAPage",
|
||||
"mainEntity": {
|
||||
"@type": "Question",
|
||||
"name": "How many ounces are there in a pound?",
|
||||
"text": "I have taken up a new interest in baking and keep running across directions in ounces and pounds. I have to translate between them and was wondering how many ounces are in a pound?",
|
||||
"answerCount": 3,
|
||||
"upvoteCount": 26,
|
||||
"acceptedAnswer": {
|
||||
"@type": "Answer",
|
||||
"text": "1 pound (lb) is equal to 16 ounces (oz).",
|
||||
"upvoteCount": 1337,
|
||||
"url": "https://example.com/question1#acceptedAnswer"
|
||||
},
|
||||
"suggestedAnswer": [
|
||||
{
|
||||
"@type": "Answer",
|
||||
"text": "Are you looking for ounces or fluid ounces? If you are looking for fluid ounces there are 15.34 fluid ounces in a pound of water.",
|
||||
"upvoteCount": 42,
|
||||
"url": "https://example.com/question1#suggestedAnswer1"
|
||||
}, {
|
||||
"@type": "Answer",
|
||||
"text": " I can't remember exactly, but I think 18 ounces in a lb. You might want to double check that.",
|
||||
"upvoteCount": 0,
|
||||
"url": "https://example.com/question1#suggestedAnswer2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
return data
|
||||
41
BaseModels/search_optimization/ld_json/ld_article_news.py
Normal file
41
BaseModels/search_optimization/ld_json/ld_article_news.py
Normal file
@@ -0,0 +1,41 @@
|
||||
|
||||
import json
|
||||
|
||||
import project_sets
|
||||
from project_sets import *
|
||||
from django.urls import reverse
|
||||
from django.utils.html import strip_tags
|
||||
|
||||
def get_ld_article_news(art_name, art_txt, art_DT, url_data):
|
||||
from BaseModels.inter import get_all_photos_from_html_content
|
||||
|
||||
img_list = get_all_photos_from_html_content(art_txt)
|
||||
if img_list:
|
||||
img_list = list(map(lambda img: "{0}{1}".format(project_sets.domain, img), img_list))
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "NewsArticle",
|
||||
"url": "{0}{1}".format(project_sets.domain, reverse(**url_data)),
|
||||
"publisher":{
|
||||
"@type":"Organization",
|
||||
"name": project_sets.company_name,
|
||||
"logo": project_sets.logo
|
||||
},
|
||||
"author": {
|
||||
"@type": "Organization",
|
||||
"name": project_sets.company_name,
|
||||
"logo": project_sets.logo,
|
||||
"url": project_sets.domain,
|
||||
},
|
||||
"headline": art_name,
|
||||
# "mainEntityOfPage": "http://www.bbc.com/news/world-us-canada-39324587", # ссылка на источник
|
||||
"articleBody": strip_tags(art_txt),
|
||||
"datePublished": art_DT.isoformat()
|
||||
}
|
||||
if img_list:
|
||||
data.update({
|
||||
'image': img_list
|
||||
})
|
||||
|
||||
return json.dumps(data)
|
||||
39
BaseModels/search_optimization/ld_json/ld_breadcrambs.py
Normal file
39
BaseModels/search_optimization/ld_json/ld_breadcrambs.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import json
|
||||
|
||||
def get_ld_breadcrambs(items_list):
|
||||
|
||||
elements_list = []
|
||||
i = 1
|
||||
while i <= len(items_list):
|
||||
item = items_list[i-1]
|
||||
url = None
|
||||
if type(item) == str:
|
||||
name = item
|
||||
elif type(item) == dict:
|
||||
name = item['name']
|
||||
url = item['url']
|
||||
else:
|
||||
name = item.name
|
||||
url = item.url
|
||||
|
||||
Dict = {
|
||||
"@type": "ListItem",
|
||||
"position": i,
|
||||
"name": name,
|
||||
}
|
||||
if i < len(items_list):
|
||||
Dict.update({
|
||||
"item": url
|
||||
})
|
||||
|
||||
elements_list.append(Dict)
|
||||
|
||||
i += 1
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "BreadcrumbList",
|
||||
"itemListElement": elements_list
|
||||
}
|
||||
|
||||
return json.dumps(data)
|
||||
243
BaseModels/search_optimization/ld_json/ld_company.py
Normal file
243
BaseModels/search_optimization/ld_json/ld_company.py
Normal file
@@ -0,0 +1,243 @@
|
||||
import json
|
||||
|
||||
import project_sets
|
||||
from collections import OrderedDict
|
||||
|
||||
def get_ld_logo():
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "Organization",
|
||||
"url": project_sets.domain,
|
||||
"logo": project_sets.logo
|
||||
}
|
||||
return data
|
||||
|
||||
|
||||
def get_ld_company(offices):
|
||||
try:
|
||||
main_office = offices.get(main_office=True)
|
||||
except:
|
||||
main_office = offices[0]
|
||||
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "LocalBusiness",
|
||||
"logo": project_sets.logo,
|
||||
}
|
||||
|
||||
ld_for_main_office = get_ld_office(main_office)
|
||||
data.update(ld_for_main_office)
|
||||
|
||||
|
||||
departments = []
|
||||
for office in offices:
|
||||
if office == main_office:
|
||||
continue
|
||||
|
||||
departments.append(get_ld_office(office))
|
||||
|
||||
# if departments:
|
||||
# data.update({
|
||||
# 'department': departments
|
||||
# })
|
||||
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
def get_ld_office(office):
|
||||
|
||||
try:
|
||||
phones = office.phones()
|
||||
except:
|
||||
phones = []
|
||||
|
||||
if not phones:
|
||||
try:
|
||||
phones = office.rel_contacts_for_office
|
||||
except:
|
||||
phones = []
|
||||
|
||||
data = {
|
||||
"name": office.name,
|
||||
}
|
||||
|
||||
# На каждой странице (с разметкой или без нее) должно присутствовать хотя бы одно изображение. Робот Google выберет лучшее изображение для показа в результатах поиска с учетом соотношения сторон и разрешения.
|
||||
# URL изображений должны быть доступны для сканирования и индексирования. Проверить, есть ли у поискового робота Google доступ к URL вашего контента, можно с помощью инструмента, описанного в этой статье.
|
||||
# Изображения должны соответствовать размеченному контенту.
|
||||
# Допускаются только графические файлы форматов, совместимых с Google Картинками.
|
||||
# Предоставьте несколько изображений в высоком разрешении (не менее 50 000 пикселей по произведению ширины и высоты) со следующими соотношениями сторон: 16 × 9, 4 × 3 и 1 × 1.
|
||||
data.update({
|
||||
"image": [
|
||||
project_sets.logo,
|
||||
]
|
||||
})
|
||||
|
||||
# data.update({
|
||||
# "@type": "Store",
|
||||
# })
|
||||
# не обязательно!
|
||||
|
||||
# AnimalShelter
|
||||
# ArchiveOrganization
|
||||
# AutomotiveBusiness
|
||||
# ChildCare
|
||||
# Dentist
|
||||
# DryCleaningOrLaundry
|
||||
# EmergencyService
|
||||
# EmploymentAgency
|
||||
# EntertainmentBusiness
|
||||
# FinancialService
|
||||
# FoodEstablishment
|
||||
# GovernmentOffice
|
||||
# HealthAndBeautyBusiness
|
||||
# HomeAndConstructionBusiness
|
||||
# InternetCafe
|
||||
# LegalService
|
||||
# Library
|
||||
# LodgingBusiness
|
||||
# MedicalBusiness
|
||||
# ProfessionalService
|
||||
# RadioStation
|
||||
# RealEstateAgent
|
||||
# RecyclingCenter
|
||||
# SelfStorage
|
||||
# ShoppingCenter
|
||||
# SportsActivityLocation
|
||||
# Store
|
||||
# TelevisionStation
|
||||
# TouristInformationCenter
|
||||
# TravelAgency
|
||||
|
||||
i_Dict = {
|
||||
"address": {
|
||||
"@type": "PostalAddress",
|
||||
"streetAddress": office.address,
|
||||
"addressLocality": office.city,
|
||||
# "addressRegion": "CA",
|
||||
# "postalCode": "95129",
|
||||
# "addressCountry": "US"
|
||||
},
|
||||
}
|
||||
if phones:
|
||||
i_Dict["address"].update({
|
||||
"telephone": '{0}{1}'.format(phones[0].prefix, phones[0].nomber_phone),
|
||||
})
|
||||
|
||||
data.update(i_Dict)
|
||||
|
||||
gps_longitude = getattr(office, 'gps_longitude', None)
|
||||
gps_latitude = getattr(office, 'gps_latitude', None)
|
||||
if not gps_longitude:
|
||||
gps_longitude = getattr(project_sets, 'gps_longitude', None)
|
||||
if not gps_latitude:
|
||||
gps_latitude = getattr(project_sets, 'gps_latitude', None)
|
||||
if gps_longitude and gps_latitude:
|
||||
i_Dict = {
|
||||
"geo": {
|
||||
"@type": "GeoCoordinates",
|
||||
"latitude": gps_latitude,
|
||||
"longitude": gps_longitude
|
||||
},
|
||||
}
|
||||
data.update(i_Dict)
|
||||
|
||||
data.update({
|
||||
"url": project_sets.domain
|
||||
})
|
||||
|
||||
# "foundingDate": "2005-02-07", # дата основания
|
||||
|
||||
company_reference_links = getattr(project_sets, 'company_reference_links')
|
||||
if company_reference_links:
|
||||
data.update({
|
||||
"sameAs": company_reference_links
|
||||
})
|
||||
|
||||
priceRange = getattr(office, 'priceRange', '$')
|
||||
if priceRange:
|
||||
data.update({
|
||||
"priceRange": priceRange
|
||||
})
|
||||
|
||||
work_time_from = getattr(office, 'work_time_from', None)
|
||||
if not work_time_from:
|
||||
work_time_from = getattr(project_sets, 'work_time_from', '9:00')
|
||||
work_time_to = getattr(office, 'work_time_to', None)
|
||||
if not work_time_to:
|
||||
work_time_to = getattr(project_sets, 'work_time_to', '18:00')
|
||||
|
||||
i_Dict = {
|
||||
"openingHoursSpecification": [
|
||||
{
|
||||
"@type": "OpeningHoursSpecification",
|
||||
"dayOfWeek": [
|
||||
"https://schema.org/Monday",
|
||||
"https://schema.org/Tuesday",
|
||||
"https://schema.org/Wednesday",
|
||||
"https://schema.org/Thursday",
|
||||
"https://schema.org/Friday",
|
||||
# "https://schema.org/Saturday"
|
||||
],
|
||||
"opens": work_time_from,
|
||||
"closes": work_time_to
|
||||
},
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "Sunday",
|
||||
# "opens": "08:00",
|
||||
# "closes": "23:00"
|
||||
# }
|
||||
],
|
||||
}
|
||||
# i_Dict = {
|
||||
# "openingHoursSpecification": [
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Monday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Tuesday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Wednesday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Thursday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Friday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Saturday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": "https://schema.org/Sunday",
|
||||
# "opens": work_time_from,
|
||||
# "closes": work_time_to
|
||||
# },
|
||||
# ],
|
||||
# }
|
||||
data.update(i_Dict)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
import project_sets
|
||||
from django.urls import reverse
|
||||
from django.utils.html import strip_tags
|
||||
|
||||
|
||||
def create_videoobject(video_path, name, description, DT):
|
||||
video_id = video_path.split('/')[-1]
|
||||
thumbs = list(map(lambda s: "https://img.youtube.com/vi/{0}/{1}.jpg".format(video_id, str(s)), range(1, 5)))
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "VideoObject",
|
||||
"name": name,
|
||||
"description": description,
|
||||
"thumbnailUrl": thumbs,
|
||||
"uploadDate": DT.isoformat(),
|
||||
# "duration": "PT1M54S", # продолжительность видео
|
||||
# "contentUrl": "https://www.example.com/video/123/file.mp4", # адрес к видеофайлу
|
||||
"embedUrl": video_path,
|
||||
# "interactionStatistic": { # количество просмотров
|
||||
# "@type": "InteractionCounter",
|
||||
# "interactionType": { "@type": "WatchAction" },
|
||||
# "userInteractionCount": 5647018
|
||||
# },
|
||||
# "regionsAllowed": "US,NL" # разрешенные регионы
|
||||
}
|
||||
return data
|
||||
|
||||
|
||||
def get_ld_videoobjects_for_page_html(obj, name, description, DT, content):
|
||||
from BaseModels.inter import get_all_videos_from_html_content
|
||||
res_list = []
|
||||
|
||||
if obj.video:
|
||||
data = create_videoobject(obj.video, name, description, DT)
|
||||
res_list.append(json.dumps(data))
|
||||
|
||||
if not content:
|
||||
return res_list
|
||||
|
||||
videos_list = get_all_videos_from_html_content(content)
|
||||
# if videos_list:
|
||||
# img_list = list(map(lambda img: "{0}{1}".format(project_sets.domain, img), videos_list))
|
||||
|
||||
for video_path in videos_list:
|
||||
if not video_path in obj.video and not obj.video in video_path:
|
||||
data = create_videoobject(video_path, name, description, DT)
|
||||
res_list.append(json.dumps(data))
|
||||
|
||||
return res_list
|
||||
178
BaseModels/search_optimization/ld_json/ld_product.py
Normal file
178
BaseModels/search_optimization/ld_json/ld_product.py
Normal file
@@ -0,0 +1,178 @@
|
||||
# import json
|
||||
#
|
||||
# import project_sets
|
||||
# from BaseModels.functions import add_domain
|
||||
#
|
||||
#
|
||||
# def get_ld_shipping_data_for_product(shipping_terms):
|
||||
# shipping_terms_list = []
|
||||
# for item in shipping_terms:
|
||||
# data = {
|
||||
# "@type": "OfferShippingDetails",
|
||||
# "shippingRate": {
|
||||
# "@type": "MonetaryAmount",
|
||||
# "value": item.price,
|
||||
# "currency": project_sets.base_currency
|
||||
# },
|
||||
# "shippingDestination": {
|
||||
# "@type": "DefinedRegion",
|
||||
# "addressCountry": project_sets.shipping_region, # обязательно
|
||||
# # "postalCodeRange": {
|
||||
# # "postalCodeBegin": "98100",
|
||||
# # "postalCodeEnd": "98199"
|
||||
# # }
|
||||
# },
|
||||
# "deliveryTime": {
|
||||
# "@type": "ShippingDeliveryTime",
|
||||
# "cutOffTime": project_sets.cutOffTime, # "19:30-08:00",
|
||||
#
|
||||
# # Стандартное время от получения оплаты до отправки товаров со склада (или подготовки к самовывозу, если используется такой вариант)
|
||||
# "handlingTime": {
|
||||
# "@type": "QuantitativeValue",
|
||||
# "minValue": "0", # дней
|
||||
# "maxValue": "1" # дней
|
||||
# },
|
||||
# # Стандартное время от отправки заказа до его прибытия к конечному покупателю.
|
||||
# "transitTime": {
|
||||
# "@type": "QuantitativeValue",
|
||||
# "minValue": "1", # дней
|
||||
# "maxValue": "5" # дней
|
||||
# },
|
||||
# # Время, после которого новые заказы не обрабатываются в тот же день
|
||||
#
|
||||
# # Дни недели, по которым вы обрабатываете заказы
|
||||
# "businessDays": {
|
||||
# "@type": "OpeningHoursSpecification",
|
||||
# "dayOfWeek": ["https://schema.org/Monday", "https://schema.org/Tuesday",
|
||||
# "https://schema.org/Wednesday", "https://schema.org/Thursday"]
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# shipping_terms_list.append(data)
|
||||
#
|
||||
# data = {
|
||||
# "shippingDetails": shipping_terms_list
|
||||
# }
|
||||
#
|
||||
# return data
|
||||
#
|
||||
#
|
||||
# def get_ld_offers_for_product(product, domain, shipping_terms):
|
||||
# data = {
|
||||
# "offers": {
|
||||
# "@type": "Offer",
|
||||
# "url": '{0}{1}'.format(domain, product.get_site_url()),
|
||||
# "itemCondition": "https://schema.org/NewCondition",
|
||||
# # "https://schema.org/NewCondition"
|
||||
# # "https://schema.org/UsedCondition"
|
||||
# "availability": "https://schema.org/InStock",
|
||||
# # https://schema.org/BackOrder
|
||||
# # https://schema.org/Discontinued
|
||||
# # https://schema.org/InStock
|
||||
# # https://schema.org/InStoreOnly
|
||||
# # https://schema.org/LimitedAvailability
|
||||
# # https://schema.org/OnlineOnly
|
||||
# # https://schema.org/OutOfStock
|
||||
# # https://schema.org/PreOrder
|
||||
# # https://schema.org/PreSale
|
||||
# # https://schema.org/SoldOut
|
||||
# "price": str(product.price),
|
||||
# "priceCurrency": project_sets.base_currency,
|
||||
# # "priceValidUntil": "2020-11-20", #дата окончания действия цены
|
||||
# # "shippingSettingsLink": '{0}{1}'.format(project_sets.domain, 'delivery/'),
|
||||
#
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# if shipping_terms:
|
||||
# data["offers"].update(get_ld_shipping_data_for_product(shipping_terms))
|
||||
#
|
||||
# return data
|
||||
#
|
||||
#
|
||||
# def get_aggregate_rating(product):
|
||||
# data = {
|
||||
# # "review": {
|
||||
# # "@type": "Review",
|
||||
# # "reviewRating": {
|
||||
# # "@type": "Rating",
|
||||
# # "ratingValue": "4",
|
||||
# # "bestRating": "5"
|
||||
# # },
|
||||
# # "author": {
|
||||
# # "@type": "Person",
|
||||
# # "name": "Fred Benson"
|
||||
# # }
|
||||
# # },
|
||||
# "aggregateRating": {
|
||||
# "@type": "AggregateRating",
|
||||
# "ratingValue": product.ratingValue,
|
||||
# "reviewCount": product.reviewCount
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# return data
|
||||
#
|
||||
#
|
||||
# def get_ld_product(product, domain, shipping_terms):
|
||||
# from GeneralApp.views import get_cur_domain
|
||||
# serv_domain, local_domain = get_cur_domain()
|
||||
#
|
||||
# data = {
|
||||
# "@context": "https://schema.org/",
|
||||
# "@type": "Product",
|
||||
# "name": product.name,
|
||||
# "sku": '{0}-{1}'.format(str(product.brand), str(product.article)),
|
||||
# "url": '{0}{1}'.format(domain, product.get_site_url()),
|
||||
# }
|
||||
#
|
||||
# if product.description:
|
||||
# data.update({
|
||||
# "description": product.description,
|
||||
# })
|
||||
#
|
||||
# barcode = getattr(product, 'barcode', None)
|
||||
# if barcode:
|
||||
# data.update({
|
||||
# "gtin14": barcode,
|
||||
# })
|
||||
#
|
||||
# gallery = getattr(product, 'gallery', None)
|
||||
# if gallery:
|
||||
# try:
|
||||
# photos = gallery.get_photos()
|
||||
# photos = list(map(lambda ph: '{0}{1}'.format(serv_domain, ph), photos))
|
||||
# except Exception as e:
|
||||
# photos = None
|
||||
#
|
||||
# if photos:
|
||||
# data.update({
|
||||
# "image": photos,
|
||||
# })
|
||||
#
|
||||
# brand = getattr(product, 'brand', None)
|
||||
# if brand:
|
||||
# if type(brand) not in [str]:
|
||||
# brand = brand.name
|
||||
#
|
||||
# data.update({
|
||||
# "brand": {
|
||||
# "@type": "Brand",
|
||||
# "name": brand
|
||||
# },
|
||||
# })
|
||||
#
|
||||
# FAQ = {}
|
||||
#
|
||||
# from ...
|
||||
#
|
||||
# aggregate_rating = getattr(product, 'ratingValue', None)
|
||||
# if aggregate_rating != None:
|
||||
# data.update(get_aggregate_rating(product))
|
||||
#
|
||||
# price = getattr(product, 'price', None)
|
||||
# if price:
|
||||
# data.update(get_ld_offers_for_product(product, domain, shipping_terms))
|
||||
#
|
||||
# return json.dumps(data)
|
||||
22
BaseModels/search_optimization/ld_json/ld_search.py
Normal file
22
BaseModels/search_optimization/ld_json/ld_search.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import json
|
||||
import project_sets
|
||||
|
||||
def get_ld_search(domain):
|
||||
|
||||
# Только для главной страницы
|
||||
|
||||
data = {
|
||||
"@context": "https://schema.org",
|
||||
"@type": "WebSite",
|
||||
"url": domain, #"https://truenergy.by/",
|
||||
"potentialAction": {
|
||||
"@type": "SearchAction",
|
||||
"target": {
|
||||
"@type": "EntryPoint",
|
||||
"urlTemplate": "{domain}/{search_term_string}/".format(domain=domain, search_term_string='{search_term_string}')
|
||||
},
|
||||
"query-input": "required name=search_term_string"
|
||||
}
|
||||
}
|
||||
|
||||
return json.dumps(data)
|
||||
140
BaseModels/search_optimization/ld_json/ld_vacancy.py
Normal file
140
BaseModels/search_optimization/ld_json/ld_vacancy.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import datetime
|
||||
import project_sets
|
||||
|
||||
|
||||
def get_ld_vacancies(data_Dict):
|
||||
|
||||
# Разметку JobPosting можно размещать только на страницах, которые содержат одно объявление о вакансии.
|
||||
# Не разрешается добавлять разметку JobPosting на какие-либо другие страницы, в том числе те, на которых нет информации ни об одной вакансии.
|
||||
|
||||
vacancies_list = []
|
||||
|
||||
for item in data_Dict:
|
||||
data = {
|
||||
"@context": "https://schema.org/",
|
||||
"@type": "JobPosting",
|
||||
"title": item['title'],
|
||||
"description": item['description'],
|
||||
"datePosted": datetime.datetime.now().strftime('%Y-%m-%d'),
|
||||
"validThrough": item['validThrough'].strftime('%Y-%m-%dT%H:%M'), #"2017-03-18T00:00", # окончание срока действия
|
||||
"identifier": {
|
||||
"@type": "PropertyValue",
|
||||
"name": project_sets.company_name,
|
||||
"value": str(item['id'])
|
||||
},
|
||||
"hiringOrganization": {
|
||||
"@type": "Organization",
|
||||
"name": project_sets.company_name,
|
||||
"sameAs": project_sets.domain,
|
||||
"logo": project_sets.logo
|
||||
},
|
||||
}
|
||||
|
||||
if 'office' in item:
|
||||
# используется для указания места, в котором сотрудник будет выполнять работу. Если определенного места (например, офиса или производственной площадки) нет, использовать это свойство не обязательно.
|
||||
job_place_Dict = {
|
||||
"jobLocation": {
|
||||
"@type": "Place",
|
||||
"address": {
|
||||
"@type": "PostalAddress",
|
||||
"streetAddress": item['office'].address,
|
||||
"addressLocality": item['office'].city,
|
||||
"addressCountry": "BY"
|
||||
},
|
||||
},
|
||||
}
|
||||
else:
|
||||
job_place_Dict = {
|
||||
"jobLocationType": "TELECOMMUTE" # только удаленка
|
||||
}
|
||||
data.update(job_place_Dict)
|
||||
|
||||
if 'required_country_of_residence' in item:
|
||||
# используется для указания территории, на которой может проживать кандидат на должность. Необходимо, чтобы была задана по меньшей мере одна страна
|
||||
required_country_of_residence = {
|
||||
"applicantLocationRequirements": {
|
||||
"@type": "Country",
|
||||
"name": item['required_country_of_residence']['country']
|
||||
},
|
||||
}
|
||||
data.update(required_country_of_residence)
|
||||
|
||||
if 'salary' in item:
|
||||
salary_Dict = {
|
||||
"baseSalary": {
|
||||
"@type": "MonetaryAmount",
|
||||
"currency": item['salary']['currency'],
|
||||
"value": {
|
||||
"@type": "QuantitativeValue",
|
||||
"unitText": item['salary']['time_unit']
|
||||
# HOUR
|
||||
# DAY
|
||||
# WEEK
|
||||
# MONTH
|
||||
# YEAR
|
||||
}
|
||||
}
|
||||
}
|
||||
if 'price' in item['salary']:
|
||||
salary_Dict['baseSalary']['value']['value'] = item['salary']['price']
|
||||
elif 'price_from' in item['salary']:
|
||||
salary_Dict['baseSalary']['value']['minValue'] = item['salary']['price_from']
|
||||
|
||||
if 'price_to' in item['salary']:
|
||||
salary_Dict['baseSalary']['value']['maxValue'] = item['salary']['price_to']
|
||||
|
||||
data.update(salary_Dict)
|
||||
|
||||
# Указание на то, поддерживается ли на странице с объявлением о вакансии отправка резюме напрямую.
|
||||
data.update({
|
||||
'directApply': item['directApply']
|
||||
})
|
||||
|
||||
# Вид занятости Укажите одно или несколько значений
|
||||
if 'employmentType' in item:
|
||||
# FULL_TIME
|
||||
# PART_TIME
|
||||
# CONTRACTOR
|
||||
# TEMPORARY
|
||||
# INTERN
|
||||
# VOLUNTEER
|
||||
# PER_DIEM
|
||||
# OTHER
|
||||
data.update({
|
||||
'employmentType': item['employmentType']
|
||||
})
|
||||
|
||||
if 'educationRequirements' in item:
|
||||
e_Dict = {
|
||||
"educationRequirements": {
|
||||
"@type": "EducationalOccupationalCredential",
|
||||
"credentialCategory": item['educationRequirements']
|
||||
# high school
|
||||
# associate degree
|
||||
# bachelor degree
|
||||
# professional certificate
|
||||
# postgraduate degree
|
||||
},
|
||||
}
|
||||
data.update(e_Dict)
|
||||
|
||||
if 'experienceRequirements' in item:
|
||||
e_Dict = {
|
||||
"experienceRequirements": {
|
||||
"@type": "OccupationalExperienceRequirements",
|
||||
"monthsOfExperience": item['experienceRequirements'] # опыт работы в месяцах
|
||||
},
|
||||
}
|
||||
data.update(e_Dict)
|
||||
|
||||
# Со значением "истина" это свойство будет указывать на то, что кандидатам достаточно иметь опыт, если у них нет требуемого образования
|
||||
if 'required_only_experience' in item:
|
||||
if 'experienceRequirements' in item and 'educationRequirements' in item:
|
||||
data.update({
|
||||
'experienceInPlaceOfEducation': item['required_only_experience']
|
||||
})
|
||||
|
||||
vacancies_list.append(data)
|
||||
|
||||
return vacancies_list
|
||||
|
||||
1
BaseModels/search_optimization/ya_YML/ya_YML.py
Normal file
1
BaseModels/search_optimization/ya_YML/ya_YML.py
Normal file
@@ -0,0 +1 @@
|
||||
# https://yandex.ru/dev/turbo-shop/doc/quick-start/markets.html
|
||||
1
BaseModels/search_optimization/ya_tips
Normal file
1
BaseModels/search_optimization/ya_tips
Normal file
@@ -0,0 +1 @@
|
||||
https://yandex.ru/support/webmaster/index.html
|
||||
Reference in New Issue
Block a user