উইকিঅভিধান
bnwiktionary
https://bn.wiktionary.org/wiki/%E0%A6%AA%E0%A7%8D%E0%A6%B0%E0%A6%A7%E0%A6%BE%E0%A6%A8_%E0%A6%AA%E0%A6%BE%E0%A6%A4%E0%A6%BE
MediaWiki 1.46.0-wmf.23
case-sensitive
মিডিয়া
বিশেষ
আলাপ
ব্যবহারকারী
ব্যবহারকারী আলাপ
উইকিঅভিধান
উইকিঅভিধান আলোচনা
চিত্র
চিত্র আলোচনা
মিডিয়াউইকি
মিডিয়াউইকি আলোচনা
টেমপ্লেট
টেমপ্লেট আলোচনা
সাহায্য
সাহায্য আলোচনা
বিষয়শ্রেণী
বিষয়শ্রেণী আলোচনা
পরিশিষ্ট
পরিশিষ্ট আলোচনা
ছন্দ
ছন্দ আলোচনা
থিসরাস
থিসরাস আলোচনা
উদ্ধৃতি
উদ্ধৃতি আলোচনা
TimedText
TimedText talk
মডিউল
মডিউল আলাপ
Event
Event talk
মডিউল:links
828
6287
507788
323746
2026-04-14T06:43:51Z
Redmin
6857
[[en:Module:links|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507788
Scribunto
text/plain
local export = {}
--[=[
[[Unsupported titles]], pages with high memory usage,
extraction modules and part-of-speech names are listed
at [[Module:links/data]].
Other modules used:
[[Module:script utilities]]
[[Module:scripts]]
[[Module:languages]] and its submodules
[[Module:gender and number]]
[[Module:debug/track]]
]=]
local anchors_module = "Module:anchors"
local debug_track_module = "Module:debug/track"
local form_of_module = "Module:form of"
local gender_and_number_module = "Module:gender and number"
local languages_module = "Module:languages"
local load_module = "Module:load"
local memoize_module = "Module:memoize"
local pages_module = "Module:pages"
local pron_qualifier_module = "Module:pron qualifier"
local scripts_module = "Module:scripts"
local script_utilities_module = "Module:script utilities"
local string_encode_entities_module = "Module:string/encode entities"
local string_utilities_module = "Module:string utilities"
local table_module = "Module:table"
local utilities_module = "Module:utilities"
local concat = table.concat
local find = string.find
local get_current_title = mw.title.getCurrentTitle
local insert = table.insert
local ipairs = ipairs
local match = string.match
local new_title = mw.title.new
local pairs = pairs
local remove = table.remove
local sub = string.sub
local toNFC = mw.ustring.toNFC
local tostring = tostring
local type = type
local unstrip = mw.text.unstrip
local NAMESPACE = get_current_title().nsText
local function anchor_encode(...)
anchor_encode = require(memoize_module)(mw.uri.anchorEncode, true)
return anchor_encode(...)
end
local function debug_track(...)
debug_track = require(debug_track_module)
return debug_track(...)
end
local function decode_entities(...)
decode_entities = require(string_utilities_module).decode_entities
return decode_entities(...)
end
local function decode_uri(...)
decode_uri = require(string_utilities_module).decode_uri
return decode_uri(...)
end
-- Can't yet replace, as the [[Module:string utilities]] version no longer has automatic double-encoding prevention, which requires changes here to account for.
local function encode_entities(...)
encode_entities = require(string_encode_entities_module)
return encode_entities(...)
end
local function extend(...)
extend = require(table_module).extend
return extend(...)
end
local function find_best_script_without_lang(...)
find_best_script_without_lang = require(scripts_module).findBestScriptWithoutLang
return find_best_script_without_lang(...)
end
local function format_categories(...)
format_categories = require(utilities_module).format_categories
return format_categories(...)
end
local function format_genders(...)
format_genders = require(gender_and_number_module).format_genders
return format_genders(...)
end
local function format_qualifiers(...)
format_qualifiers = require(pron_qualifier_module).format_qualifiers
return format_qualifiers(...)
end
local function get_current_L2(...)
get_current_L2 = require(pages_module).get_current_L2
return get_current_L2(...)
end
local function get_lang(...)
get_lang = require(languages_module).getByCode
return get_lang(...)
end
local function get_script(...)
get_script = require(scripts_module).getByCode
return get_script(...)
end
local function language_anchor(...)
language_anchor = require(anchors_module).language_anchor
return language_anchor(...)
end
local function load_data(...)
load_data = require(load_module).load_data
return load_data(...)
end
local function request_script(...)
request_script = require(script_utilities_module).request_script
return request_script(...)
end
local function shallow_copy(...)
shallow_copy = require(table_module).shallowCopy
return shallow_copy(...)
end
local function split(...)
split = require(string_utilities_module).split
return split(...)
end
local function tag_text(...)
tag_text = require(script_utilities_module).tag_text
return tag_text(...)
end
local function tag_translit(...)
tag_translit = require(script_utilities_module).tag_translit
return tag_translit(...)
end
local function trim(...)
trim = require(string_utilities_module).trim
return trim(...)
end
local function u(...)
u = require(string_utilities_module).char
return u(...)
end
local function ulower(...)
ulower = require(string_utilities_module).lower
return ulower(...)
end
local function umatch(...)
umatch = require(string_utilities_module).match
return umatch(...)
end
local m_headword_data
local function get_headword_data()
m_headword_data = load_data("Module:headword/data")
return m_headword_data
end
local function track(page, code)
local tracking_page = "links/" .. page
debug_track(tracking_page)
if code then
debug_track(tracking_page .. "/" .. code)
end
end
local function selective_trim(...)
-- Unconditionally trimmed charset.
local always_trim =
"\194\128-\194\159" .. -- U+0080-009F (C1 control characters)
"\194\173" .. -- U+00AD (soft hyphen)
"\226\128\170-\226\128\174" .. -- U+202A-202E (directionality formatting characters)
"\226\129\166-\226\129\169" -- U+2066-2069 (directionality formatting characters)
-- Standard trimmed charset.
local standard_trim = "%s" .. -- (default whitespace charset)
"\226\128\139-\226\128\141" .. -- U+200B-200D (zero-width spaces)
always_trim
-- If there are non-whitespace characters, trim all characters in `standard_trim`.
-- Otherwise, only trim the characters in `always_trim`.
selective_trim = function(text)
if text == "" then
return text
end
local trimmed = trim(text, standard_trim)
if trimmed ~= "" then
return trimmed
end
return trim(text, always_trim)
end
return selective_trim(...)
end
local function escape(text, str)
local rep
repeat
text, rep = text:gsub("\\\\(\\*" .. str .. ")", "\5%1")
until rep == 0
return (text:gsub("\\" .. str, "\6"))
end
local function unescape(text, str)
return (text
:gsub("\5", "\\")
:gsub("\6", str))
end
-- Remove bold, italics, soft hyphens, strip markers and HTML tags.
local function remove_formatting(str)
str = str
:gsub("('*)'''(.-'*)'''", "%1%2")
:gsub("('*)''(.-'*)''", "%1%2")
:gsub("", "")
return (unstrip(str)
:gsub("<[^<>]+>", ""))
end
--[==[Takes an input and splits on a double slash (taking account of escaping backslashes).]==]
function export.split_on_slashes(text)
if text:find("\\", nil, true) then
track("escaped", "split_on_slashes")
end
text = split(escape(text, "//"), "//", true) or {}
for i, v in ipairs(text) do
text[i] = unescape(v, "//")
if v == "" then
text[i] = false
end
end
return text
end
--[==[Takes a wikilink and outputs the link target and display text. By default, the link target will be returned as a title object, but if `allow_bad_target` is set it will be returned as a string, and no check will be performed as to whether it is a valid link target.]==]
function export.get_wikilink_parts(text, allow_bad_target)
-- TODO: replace `allow_bad_target` with `allow_unsupported`, with support for links to unsupported titles, including escape sequences.
if ( -- Filters out anything but "[[...]]" with no intermediate "[[" or "]]".
not match(text, "^()%[%[") or -- Faster than sub(text, 1, 2) ~= "[[".
find(text, "[[", 3, true) or
find(text, "]]", 3, true) ~= #text - 1
) then
return nil, nil
end
local pipe, title, display = find(text, "|", 3, true)
if pipe then
title, display = sub(text, 3, pipe - 1), sub(text, pipe + 1, -3)
else
title = sub(text, 3, -3)
display = title
end
if allow_bad_target then
return title, display
end
title = new_title(title)
-- No title object means the target is invalid.
if title == nil then
return nil, nil
-- If the link target starts with "#" then mw.title.new returns a broken
-- title object, so grab the current title and give it the correct fragment.
elseif title.prefixedText == "" then
local fragment = title.fragment
if fragment == "" then -- [[#]] isn't valid
return nil, nil
end
title = get_current_title()
title.fragment = fragment
end
return title, display
end
-- Does the work of export.get_fragment, but can be called directly to avoid unnecessary checks for embedded links.
local function get_fragment(text)
text = escape(text, "#")
-- Replace numeric character references with the corresponding character (' → '),
-- as they contain #, which causes the numeric character reference to be
-- misparsed (wa'a → wa'a → pagename wa&, fragment 39;a).
text = decode_entities(text)
local target, fragment = text:match("^(.-)#(.+)$")
target = target or text
target = unescape(target, "#")
fragment = fragment and unescape(fragment, "#")
return target, fragment
end
--[==[Takes a link target and outputs the actual target and the fragment (if any).]==]
function export.get_fragment(text)
if text:find("\\", nil, true) then
track("escaped", "get_fragment")
end
-- If there are no embedded links, process input.
local open = find(text, "[[", nil, true)
if not open then
return get_fragment(text)
end
local close = find(text, "]]", open + 2, true)
if not close then
return get_fragment(text)
-- If there is one, but it's redundant (i.e. encloses everything with no pipe), remove and process.
elseif open == 1 and close == #text - 1 and not find(text, "|", 3, true) then
return get_fragment(sub(text, 3, -3))
end
-- Otherwise, return the input.
return text
end
--[==[
Given a link target as passed to `full_link()`, get the actual page that the target refers to. This removes
bold, italics, strip markets and HTML; calls `makeEntryName()` for the language in question; converts targets
beginning with `*` to the Reconstruction namespace; and converts appendix-constructed languages to the Appendix
namespace. Returns up to three values:
# the actual page to link to, or {nil} to not link to anything;
# how the target should be displayed as, if the user didn't explicitly specify any display text; generally the
same as the original target, but minus any anti-asterisk !!;
# the value `true` if the target had a backslash-escaped * in it (FIXME: explain this more clearly).
]==]
function export.get_link_page_with_auto_display(target, lang, sc, plain)
local orig_target = target
if not target then
return nil
elseif target:find("\\", nil, true) then
track("escaped", "get_link_page")
end
target = remove_formatting(target)
if target:sub(1, 1) == ":" then
track("initial colon")
-- FIXME, the auto_display (second return value) should probably remove the colon
return target:sub(2), orig_target
end
local prefix = target:match("^(.-):")
-- Convert any escaped colons
target = target:gsub("\\:", ":")
if prefix then
-- If this is an a link to another namespace or an interwiki link, ensure there's an initial colon and then
-- return what we have (so that it works as a conventional link, and doesn't do anything weird like add the term
-- to a category.)
prefix = ulower(trim(prefix))
if prefix ~= "" and (
load_data("Module:data/namespaces")[prefix] or
load_data("Module:data/interwikis")[prefix]
) then
return target, orig_target
end
end
-- Check if the term is reconstructed and remove any asterisk. Also check for anti-asterisk (!!).
-- Otherwise, handle the escapes.
local reconstructed, escaped, anti_asterisk
if not plain then
target, reconstructed = target:gsub("^%*(.)", "%1")
if reconstructed == 0 then
target, anti_asterisk = target:gsub("^!!(.)", "%1")
if anti_asterisk == 1 then
-- Remove !! from original. FIXME! We do it this way because the call to remove_formatting() above
-- may cause non-initial !! to be interpreted as anti-asterisks. We should surely move the
-- remove_formatting() call later.
orig_target = orig_target:gsub("^!!", "")
end
end
end
target, escaped = target:gsub("^(\\-)\\%*", "%1*")
if not (sc and sc:getCode() ~= "None") then
sc = lang:findBestScript(target)
end
-- Remove carets if they are used to capitalize parts of transliterations (unless they have been escaped).
if (not sc:hasCapitalization()) and sc:isTransliterated() and target:match("%^") then
target = escape(target, "^")
:gsub("%^", "")
target = unescape(target, "^")
end
-- Get the entry name for the language.
target = lang:makeEntryName(target, sc, reconstructed == 1 or lang:hasType("appendix-constructed"))
-- If the link contains unexpanded template parameters, then don't create a link.
if target:match("{{{.-}}}") then
-- FIXME: Should we return the original target as the default display value (second return value)?
return nil
end
-- Link to appendix for reconstructed terms and terms in appendix-only languages. Plain links interpret *
-- literally, however.
if reconstructed == 1 then
if lang:getFullCode() == "und" then
-- Return the original target as default display value. If we don't do this, we wrongly get
-- [Term?] displayed instead.
return nil, orig_target
end
target = "Reconstruction:" .. lang:getFullName() .. "/" .. target
-- Reconstructed languages and substrates require an initial *.
elseif anti_asterisk ~= 1 and (lang:hasType("reconstructed") or lang:getFamilyCode() == "qfa-sub") then
error(("The specified language %s is unattested, while the term '%s' does not begin with '*' to indicate that it is reconstructed.")
:
format(lang:getCanonicalName(), orig_target))
elseif lang:hasType("appendix-constructed") then
target = "Appendix:" .. lang:getFullName() .. "/" .. target
else
target = target
end
return target, orig_target, escaped > 0
end
function export.get_link_page(target, lang, sc, plain)
local target, auto_display, escaped = export.get_link_page_with_auto_display(target, lang, sc, plain)
return target, escaped
end
-- Make a link from a given link's parts
local function make_link(link, lang, sc, id, isolated, cats, no_alt_ast, plain)
-- Convert percent encoding to plaintext.
link.target = link.target and decode_uri(link.target, "PATH")
link.fragment = link.fragment and decode_uri(link.fragment, "PATH")
-- Find fragments (if one isn't already set).
-- Prevents {{l|en|word#Etymology 2|word}} from linking to [[word#Etymology 2#English]].
-- # can be escaped as \#.
if link.target and link.fragment == nil then
link.target, link.fragment = get_fragment(link.target)
end
-- Process the target
local auto_display, escaped
link.target, auto_display, escaped = export.get_link_page_with_auto_display(link.target, lang, sc, plain)
-- Create a default display form.
-- If the target is "" then it's a link like [[#English]], which refers to the current page.
if auto_display == "" then
auto_display = (m_headword_data or get_headword_data()).pagename
end
-- If the display is the target and the reconstruction * has been escaped, remove the escaping backslash.
if escaped then
auto_display = auto_display:gsub("\\([^\\]*%*)", "%1", 1)
end
-- Process the display form.
if link.display then
local orig_display = link.display
link.display = lang:makeDisplayText(link.display, sc, true)
if cats then
auto_display = lang:makeDisplayText(auto_display, sc)
-- If the alt text is the same as what would have been automatically generated, then the alt parameter is redundant (e.g. {{l|en|foo|foo}}, {{l|en|w:foo|foo}}, but not {{l|en|w:foo|w:foo}}).
-- If they're different, but the alt text could have been entered as the term parameter without it affecting the target page, then the target parameter is redundant (e.g. {{l|ru|фу|фу́}}).
-- If `no_alt_ast` is true, use pcall to catch the error which will be thrown if this is a reconstructed lang and the alt text doesn't have *.
if link.display == auto_display then
insert(cats, lang:getFullName() .. " links with redundant alt parameters")
else
local ok, check
if no_alt_ast then
ok, check = pcall(export.get_link_page, orig_display, lang, sc, plain)
else
ok = true
check = export.get_link_page(orig_display, lang, sc, plain)
end
if ok and link.target == check then
insert(cats, lang:getFullName() .. " links with redundant target parameters")
end
end
end
else
link.display = lang:makeDisplayText(auto_display, sc)
end
if not link.target then
return link.display
end
-- If the target is the same as the current page, there is no sense id
-- and either the language code is "und" or the current L2 is the current
-- language then return a "self-link" like the software does.
if link.target == get_current_title().prefixedText then
local fragment, current_L2 = link.fragment, get_current_L2()
if (
fragment and fragment == current_L2 or
not (id or fragment) and (lang:getFullCode() == "und" or lang:getFullName() == current_L2)
) then
return tostring(mw.html.create("strong")
:addClass("selflink")
:wikitext(link.display))
end
end
-- Add fragment. Do not add a section link to "Undetermined", as such sections do not exist and are invalid.
-- TabbedLanguages handles links without a section by linking to the "last visited" section, but adding
-- "Undetermined" would break that feature. For localized prefixes that make syntax error, please use the
-- format: ["xyz"] = true.
local prefix = link.target:match("^:*([^:]+):")
prefix = prefix and ulower(prefix)
if prefix ~= "category" and not (prefix and load_data("Module:data/interwikis")[prefix]) then
if (link.fragment or link.target:sub(-1) == "#") and not plain then
track("fragment", lang:getFullCode())
if cats then
insert(cats, lang:getFullName() .. " links with manual fragments")
end
end
if not link.fragment then
if id then
link.fragment = lang:getFullCode() == "und" and anchor_encode(id) or language_anchor(lang, id)
elseif lang:getFullCode() ~= "und" and not (link.target:match("^Appendix:") or link.target:match("^Reconstruction:")) then
link.fragment = anchor_encode(lang:getFullName())
end
end
end
-- Put inward-facing square brackets around a link to isolated spacing character(s).
if isolated and #link.display > 0 and not umatch(decode_entities(link.display), "%S") then
link.display = "]" .. link.display .. "["
end
link.target = link.target:gsub("^(:?)(.*)", function(m1, m2)
return m1 .. encode_entities(m2, "#%&+/:<=>@[\\]_{|}")
end)
link.fragment = link.fragment and encode_entities(remove_formatting(link.fragment), "#%&+/:<=>@[\\]_{|}")
return "[[" ..
link.target:gsub("^[^:]", ":%0") .. (link.fragment and "#" .. link.fragment or "") .. "|" .. link.display .. "]]"
end
-- Split a link into its parts
local function parse_link(linktext)
local link = { target = linktext }
local target = link.target
link.target, link.display = target:match("^(..-)|(.+)$")
if not link.target then
link.target = target
link.display = target
end
-- There's no point in processing these, as they aren't real links.
local target_lower = link.target:lower()
for _, false_positive in ipairs({ "category", "cat", "file", "image" }) do
if target_lower:match("^" .. false_positive .. ":") then
return nil
end
end
link.display = decode_entities(link.display)
link.target, link.fragment = get_fragment(link.target)
-- So that make_link does not look for a fragment again.
if not link.fragment then
link.fragment = false
end
return link
end
local function check_params_ignored_when_embedded(alt, lang, id, cats)
if alt then
track("alt-ignored")
if cats then
insert(cats, lang:getFullName() .. " links with ignored alt parameters")
end
end
if id then
track("id-ignored")
if cats then
insert(cats, lang:getFullName() .. " links with ignored id parameters")
end
end
end
-- Find embedded links and ensure they link to the correct section.
local function process_embedded_links(text, alt, lang, sc, id, cats, no_alt_ast, plain)
-- Process the non-linked text.
text = lang:makeDisplayText(text, sc, true)
-- If the text begins with * and another character, then act as if each link begins with *. However, don't do this if the * is contained within a link at the start. E.g. `|*[[foo]]` would set all_reconstructed to true, while `|[[*foo]]` would not.
local all_reconstructed = false
if not plain then
-- anchor_encode removes links etc.
if anchor_encode(text):sub(1, 1) == "*" then
all_reconstructed = true
end
-- Otherwise, handle any escapes.
text = text:gsub("^(\\-)\\%*", "%1*")
end
check_params_ignored_when_embedded(alt, lang, id, cats)
local function process_link(space1, linktext, space2)
local capture = "[[" .. linktext .. "]]"
local link = parse_link(linktext)
-- Return unprocessed false positives untouched (e.g. categories).
if not link then
return capture
end
if all_reconstructed then
if link.target:find("^!!") then
-- Check for anti-asterisk !! at the beginning of a target, indicating that a reconstructed term
-- wants a part of the term to link to a non-reconstructed term, e.g. Old English
-- {{ang-noun|m|head=*[[!!Crist|Cristes]] [[!!mæsseǣfen]]}}.
link.target = link.target:sub(3)
-- Also remove !! from the display, which may have been copied from the target (as in mæsseǣfen in
-- the example above).
link.display = link.display:gsub("^!!", "")
elseif not link.target:match("^%*") then
link.target = "*" .. link.target
end
end
linktext = make_link(link, lang, sc, id, false, nil, no_alt_ast, plain)
:gsub("^%[%[", "\3")
:gsub("%]%]$", "\4")
return space1 .. linktext .. space2
end
-- Use chars 1 and 2 as temporary substitutions, so that we can use charsets. These are converted to chars 3 and 4 by process_link, which means we can convert any remaining chars 1 and 2 back to square brackets (i.e. those not part of a link).
text = text
:gsub("%[%[", "\1")
:gsub("%]%]", "\2")
-- If the script uses ^ to capitalize transliterations, make sure that any carets preceding links are on the inside, so that they get processed with the following text.
if (
text:find("^", nil, true) and
not sc:hasCapitalization() and
sc:isTransliterated()
) then
text = escape(text, "^")
:gsub("%^\1", "\1%^")
text = unescape(text, "^")
end
text = text:gsub("\1(%s*)([^\1\2]-)(%s*)\2", process_link)
-- Remove the extra * at the beginning of a language link if it's immediately followed by a link whose display begins with * too.
if all_reconstructed then
text = text:gsub("^%*\3([^|\1-\4]+)|%*", "\3%1|*")
end
return (text
:gsub("[\1\3]", "[[")
:gsub("[\2\4]", "]]")
)
end
local function simple_link(term, fragment, alt, lang, sc, id, cats, no_alt_ast, srwc)
local plain
if lang == nil then
lang, plain = get_lang("und"), true
end
-- Get the link target and display text. If the term is the empty string, treat the input as a link to the current page.
if term == "" then
term = get_current_title().prefixedText
elseif term then
local new_term, new_alt = export.get_wikilink_parts(term, true)
if new_term then
check_params_ignored_when_embedded(alt, lang, id, cats)
-- [[|foo]] links are treated as plaintext "[[|foo]]".
-- FIXME: Pipes should be handled via a proper escape sequence, as they can occur in unsupported titles.
if new_term == "" then
term, alt = nil, term
else
local title = new_title(new_term)
if title then
local ns = title.namespace
-- File: and Category: links should be returned as-is.
if ns == 6 or ns == 14 then
return term
end
end
term, alt = new_term, new_alt
if cats then
if not (srwc and srwc(term, alt)) then
insert(cats, lang:getFullName() .. " links with redundant wikilinks")
end
end
end
end
end
if alt then
alt = selective_trim(alt)
if alt == "" then
alt = nil
end
end
-- If there's nothing to process, return nil.
if not (term or alt) then
return nil
end
-- If there is no script, get one.
if not sc then
sc = lang:findBestScript(alt or term)
end
-- Embedded wikilinks need to be processed individually.
if term then
local open = find(term, "[[", nil, true)
if open and find(term, "]]", open + 2, true) then
return process_embedded_links(term, alt, lang, sc, id, cats, no_alt_ast, plain)
end
term = selective_trim(term)
end
-- If not, make a link using the parameters.
return make_link({
target = term,
display = alt,
fragment = fragment
}, lang, sc, id, true, cats, no_alt_ast, plain)
end
--[==[Creates a basic link to the given term. It links to the language section (such as <code>==English==</code>), but it does not add language and script wrappers, so any code that uses this function should call the <code class="n">[[Module:script utilities#tag_text|tag_text]]</code> from [[Module:script utilities]] to add such wrappers itself at some point.
The first argument, <code class="n">data</code>, may contain the following items, a subset of the items used in the <code class="n">data</code> argument of <code class="n">full_link</code>. If any other items are included, they are ignored.
{ {
term = entry_to_link_to,
alt = link_text_or_displayed_text,
lang = language_object,
id = sense_id,
} }
; <code class="n">term</code>
: Text to turn into a link. This is generally the name of a page. The text can contain wikilinks already embedded in it. These are processed individually just like a single link would be. The <code class="n">alt</code> argument is ignored in this case.
; <code class="n">alt</code> (''optional'')
: The alternative display for the link, if different from the linked page. If this is {{code|lua|nil}}, the <code class="n">text</code> argument is used instead (much like regular wikilinks). If <code class="n">text</code> contains wikilinks in it, this argument is ignored and has no effect. (Links in which the alt is ignored are tracked with the tracking template {{whatlinkshere|tracking=links/alt-ignored}}.)
; <code class="n">lang</code>
: The [[Module:languages#Language objects|language object]] for the term being linked. If this argument is defined, the function will determine the language's canonical name (see [[Template:language data documentation]]), and point the link or links in the <code class="n">term</code> to the language's section of an entry, or to a language-specific senseid if the <code class="n">id</code> argument is defined.
; <code class="n">id</code> (''optional'')
: Sense id string. If this argument is defined, the link will point to a language-specific sense id ({{ll|en|identifier|id=HTML}}) created by the template {{temp|senseid}}. A sense id consists of the language's canonical name, a hyphen (<code>-</code>), and the string that was supplied as the <code class="n">id</code> argument. This is useful when a term has more than one sense in a language. If the <code class="n">term</code> argument contains wikilinks, this argument is ignored. (Links in which the sense id is ignored are tracked with the tracking template {{whatlinkshere|tracking=links/id-ignored}}.)
The second argument is as follows:
; <code class="n">allow_self_link</code>
: If {{code|lua|true}}, the function will also generate links to the current page. The default ({{code|lua|false}}) will not generate a link but generate a bolded "self link" instead.
The following special options are processed for each link (both simple text and with embedded wikilinks):
* The target page name will be processed to generate the correct entry name. This is done by the [[Module:languages#makeEntryName|makeEntryName]] function in [[Module:languages]], using the <code class="n">entry_name</code> replacements in the language's data file (see [[Template:language data documentation]] for more information). This function is generally used to automatically strip dictionary-only diacritics that are not part of the normal written form of a language.
* If the text starts with <code class="n">*</code>, then the term is considered a reconstructed term, and a link to the Reconstruction: namespace will be created. If the text contains embedded wikilinks, then <code class="n">*</code> is automatically applied to each one individually, while preserving the displayed form of each link as it was given. This allows linking to phrases containing multiple reconstructed terms, while only showing the * once at the beginning.
* If the text starts with <code class="n">:</code>, then the link is treated as "raw" and the above steps are skipped. This can be used in rare cases where the page name begins with <code class="n">*</code> or if diacritics should not be stripped. For example:
** {{temp|l|en|*nix}} links to the nonexistent page [[Reconstruction:English/nix]] (<code class="n">*</code> is interpreted as a reconstruction), but {{temp|l|en|:*nix}} links to [[*nix]].
** {{temp|l|sl|Franche-Comté}} links to the nonexistent page [[Franche-Comte]] (<code>é</code> is converted to <code>e</code> by <code class="n">makeEntryName</code>), but {{temp|l|sl|:Franche-Comté}} links to [[Franche-Comté]].]==]
function export.language_link(data)
if type(data) ~= "table" then
error(
"The first argument to the function language_link must be a table. See Module:links/documentation for more information.")
elseif data.term and data.term:find("\\", nil, true) or data.alt and data.alt:find("\\", nil, true) then
track("escaped", "language_link")
end
-- Categorize links to "und".
local lang, cats = data.lang, data.cats
if cats and lang:getCode() == "und" then
insert(cats, "Undetermined language links")
end
return simple_link(
data.term,
data.fragment,
data.alt,
lang,
data.sc,
data.id,
cats,
data.no_alt_ast,
data.suppress_redundant_wikilink_cat
)
end
function export.plain_link(data)
if type(data) ~= "table" then
error(
"The first argument to the function plain_link must be a table. See Module:links/documentation for more information.")
elseif data.term and data.term:find("\\", nil, true) or data.alt and data.alt:find("\\", nil, true) then
track("escaped", "plain_link")
end
return simple_link(
data.term,
data.fragment,
data.alt,
nil,
data.sc,
data.id,
data.cats,
data.no_alt_ast,
data.suppress_redundant_wikilink_cat
)
end
--[==[Replace any links with links to the correct section, but don't link the whole text if no embedded links are found. Returns the display text form.]==]
function export.embedded_language_links(data)
if type(data) ~= "table" then
error(
"The first argument to the function embedded_language_links must be a table. See Module:links/documentation for more information.")
elseif data.term and data.term:find("\\", nil, true) or data.alt and data.alt:find("\\", nil, true) then
track("escaped", "embedded_language_links")
end
local term, lang, sc = data.term, data.lang, data.sc
-- If we don't have a script, get one.
if not sc then
sc = lang:findBestScript(term)
end
-- Do we have embedded wikilinks? If so, they need to be processed individually.
local open = find(term, "[[", nil, true)
if open and find(term, "]]", open + 2, true) then
return process_embedded_links(term, data.alt, lang, sc, data.id, data.cats, data.no_alt_ast)
end
-- If not, return the display text.
term = selective_trim(term)
-- FIXME: Double-escape any percent-signs, because we don't want to treat non-linked text as having percent-encoded characters. This is a hack: percent-decoding should come out of [[Module:languages]] and only dealt with in this module, as it's specific to links.
term = term:gsub("%%", "%%25")
return lang:makeDisplayText(term, sc, true)
end
function export.mark(text, item_type, face, lang)
local tag = { "", "" }
if item_type == "gloss" then
tag = { '<span class="mention-gloss-double-quote">“</span><span class="mention-gloss">',
'</span><span class="mention-gloss-double-quote">”</span>' }
if type(text) == "string" and text:match("^''[^'].*''$") then
-- Temporary tracking for mention glosses that are entirely italicized or bolded, which is probably
-- wrong. (Note that this will also find bolded mention glosses since they use triple apostrophes.)
track("italicized-mention-gloss", lang and lang:getFullCode() or nil)
end
elseif item_type == "tr" then
if face == "term" then
tag = { '<span lang="' .. lang:getFullCode() .. '" class="tr mention-tr Latn">',
'</span>' }
else
tag = { '<span lang="' .. lang:getFullCode() .. '" class="tr Latn">', '</span>' }
end
elseif item_type == "ts" then
-- \226\129\160 = word joiner (zero-width non-breaking space) U+2060
tag = { '<span class="ts mention-ts Latn">/\226\129\160', '\226\129\160/</span>' }
elseif item_type == "pos" then
tag = { '<span class="ann-pos">', '</span>' }
elseif item_type == "non-gloss" then
tag = { '<span class="ann-non-gloss">', '</span>' }
elseif item_type == "annotations" then
tag = { '<span class="mention-gloss-paren annotation-paren">(</span>',
'<span class="mention-gloss-paren annotation-paren">)</span>' }
elseif item_type == "infl" then
tag = { '<span class="ann-infl">', '</span>' }
end
if type(text) == "string" then
return tag[1] .. text .. tag[2]
else
return ""
end
end
local pos_tags
--[==[Formats the annotations that are displayed with a link created by {{code|lua|full_link}}. Annotations are the extra bits of information that are displayed following the linked term, and include things such as gender, transliteration, gloss and so on.
* The first argument is a table possessing some or all of the following keys:
*:; <code class="n">genders</code>
*:: Table containing a list of gender specifications in the style of [[Module:gender and number]].
*:; <code class="n">tr</code>
*:: Transliteration.
*:; <code class="n">gloss</code>
*:: Gloss that translates the term in the link, or gives some other descriptive information.
*:; <code class="n">pos</code>
*:: Part of speech of the linked term. If the given argument matches one of the aliases in `pos_aliases` in [[Module:headword/data]], or consists of a part of speech or alias followed by `f` (for a non-lemma form), expand it appropriately. Otherwise, just show the given text as it is.
*:; <code class="n">ng</code>
*:: Arbitrary non-gloss descriptive text for the link. This should be used in preference to putting descriptive text in `gloss` or `pos`.
*:; <code class="n">lit</code>
*:: Literal meaning of the term, if the usual meaning is figurative or idiomatic.
*:; <code class="n">infl</code>
*:: Table containing a list of grammar tags in the style of [[Module:form of]] `tagged_inflections`.
*:Any of the above values can be omitted from the <code class="n">info</code> argument. If a completely empty table is given (with no annotations at all), then an empty string is returned.
* The second argument is a string. Valid values are listed in [[Module:script utilities/data]] "data.translit" table.]==]
function export.format_link_annotations(data, face)
local output = {}
-- Interwiki link
if data.interwiki then
insert(output, data.interwiki)
end
-- Genders
if type(data.genders) ~= "table" then
data.genders = { data.genders }
end
if data.genders and #data.genders > 0 then
local genders, gender_cats = format_genders(data.genders, data.lang)
insert(output, " " .. genders)
if gender_cats then
local cats = data.cats
if cats then
extend(cats, gender_cats)
end
end
end
local annotations = {}
-- Transliteration and transcription
if data.tr and data.tr[1] or data.ts and data.ts[1] then
local kind
if face == "term" then
kind = face
else
kind = "default"
end
if data.tr[1] and data.ts[1] then
insert(annotations, tag_translit(data.tr[1], data.lang, kind) .. " " .. export.mark(data.ts[1], "ts"))
elseif data.ts[1] then
insert(annotations, export.mark(data.ts[1], "ts"))
else
insert(annotations, tag_translit(data.tr[1], data.lang, kind))
end
end
-- Gloss/translation
if data.gloss then
insert(annotations, export.mark(data.gloss, "gloss"))
end
-- Part of speech
if data.pos then
-- debug category for pos= containing transcriptions
if data.pos:match("/[^><]-/") then
data.pos = data.pos .. "[[Category:links likely containing transcriptions in pos]]"
end
-- Canonicalize part of speech aliases as well as non-lemma aliases like 'nf' or 'nounf' for "noun form".
pos_tags = pos_tags or (m_headword_data or get_headword_data()).pos_aliases
local pos = pos_tags[data.pos]
if not pos and data.pos:find("f$") then
local pos_form = data.pos:sub(1, -2)
-- We only expand something ending in 'f' if the result is a recognized non-lemma POS.
pos_form = (pos_tags[pos_form] or pos_form) .. " form"
if (m_headword_data or get_headword_data()).nonlemmas[pos_form .. "s"] then
pos = pos_form
end
end
insert(annotations, export.mark(pos or data.pos, "pos"))
end
-- Inflection data
if data.infl then
local m_form_of = require(form_of_module)
-- Split tag sets manually, since tagged_inflections creates a numbered list, and we do not want that.
local infl_outputs = {}
local tag_sets = m_form_of.split_tag_set(data.infl)
for _, tag_set in ipairs(tag_sets) do
table.insert(infl_outputs,
m_form_of.tagged_inflections({ tags = tag_set, lang = data.lang, nocat = true, nolink = true, nowrap = true }))
end
insert(annotations, export.mark(table.concat(infl_outputs, "; "), "infl"))
end
-- Non-gloss text
if data.ng then
insert(annotations, export.mark(data.ng, "non-gloss"))
end
-- Literal/sum-of-parts meaning
if data.lit then
insert(annotations, "literally " .. export.mark(data.lit, "gloss"))
end
-- Provide a hook to insert additional annotations such as nested inflections.
if data.postprocess_annotations then
data.postprocess_annotations {
data = data,
annotations = annotations
}
end
if #annotations > 0 then
insert(output, " " .. export.mark(concat(annotations, ", "), "annotations"))
end
return concat(output)
end
-- Encode certain characters to avoid various delimiter-related issues at various stages. We need to encode < and >
-- because they end up forming part of CSS class names inside of <span ...> and will interfere with finding the end
-- of the HTML tag. I first tried converting them to URL encoding, i.e. %3C and %3E; they then appear in the URL as
-- %253C and %253E, which get mapped back to %3C and %3E when passed to [[Module:accel]]. But mapping them to <
-- and > somehow works magically without any further work; they appear in the URL as < and >, and get passed to
-- [[Module:accel]] as < and >. I have no idea who along the chain of calls is doing the encoding and decoding. If
-- someone knows, please modify this comment appropriately!
local accel_char_map
local function get_accel_char_map()
accel_char_map = {
["%"] = ".",
[" "] = "_",
["_"] = u(0xFFF0),
["<"] = "<",
[">"] = ">",
}
return accel_char_map
end
local function encode_accel_param_chars(param)
return (param:gsub("[% <>_]", accel_char_map or get_accel_char_map()))
end
local function encode_accel_param(prefix, param)
if not param then
return ""
end
if type(param) == "table" then
local filled_params = {}
-- There may be gaps in the sequence, especially for translit params.
local maxindex = 0
for k in pairs(param) do
if type(k) == "number" and k > maxindex then
maxindex = k
end
end
for i = 1, maxindex do
filled_params[i] = param[i] or ""
end
-- [[Module:accel]] splits these up again.
param = concat(filled_params, "*~!")
end
-- This is decoded again by [[WT:ACCEL]].
return prefix .. encode_accel_param_chars(param)
end
local function insert_if_not_blank(list, item)
if item == "" then
return
end
insert(list, item)
end
local function get_class(lang, tr, accel, nowrap)
if not accel and not nowrap then
return ""
end
local classes = {}
if accel then
insert(classes, "form-of lang-" .. lang:getFullCode())
local form = accel.form
if form then
insert(classes, encode_accel_param_chars(form) .. "-form-of")
end
insert_if_not_blank(classes, encode_accel_param("gender-", accel.gender))
insert_if_not_blank(classes, encode_accel_param("pos-", accel.pos))
insert_if_not_blank(classes, encode_accel_param("transliteration-", accel.translit or (tr ~= "-" and tr or nil)))
insert_if_not_blank(classes, encode_accel_param("target-", accel.target))
insert_if_not_blank(classes, encode_accel_param("origin-", accel.lemma))
insert_if_not_blank(classes, encode_accel_param("origin_transliteration-", accel.lemma_translit))
if accel.no_store then
insert(classes, "form-of-nostore")
end
end
if nowrap then
insert(classes, nowrap)
end
return concat(classes, " ")
end
-- Add any left or right regular or accent qualifiers, labels or references to a formatted term. `data` is the object
-- specifying the term, which should optionally contain:
-- * a language object in `lang`; required if any accent qualifiers or labels are given;
-- * left regular qualifiers in `q` (an array of strings or a single string); an empty array or blank string will be
-- ignored;
-- * right regular qualifiers in `qq` (an array of strings or a single string); an empty array or blank string will be
-- ignored;
-- * left accent qualifiers in `a` (an array of strings); an empty array will be ignored;
-- * right accent qualifiers in `aa` (an array of strings); an empty array will be ignored;
-- * left labels in `l` (an array of strings); an empty array will be ignored;
-- * right labels in `ll` (an array of strings); an empty array will be ignored;
-- * references in `refs`, an array either of strings (formatted reference text) or objects containing fields `text`
-- (formatted reference text) and optionally `name` and/or `group`.
-- `formatted` is the formatted version of the term itself.
local function add_qualifiers_and_refs_to_term(data, formatted)
local q = data.q
if type(q) == "string" then
q = { q }
end
local qq = data.qq
if type(qq) == "string" then
qq = { qq }
end
if q and q[1] or qq and qq[1] or data.a and data.a[1] or data.aa and data.aa[1] or data.l and data.l[1] or
data.ll and data.ll[1] or data.refs and data.refs[1] then
formatted = format_qualifiers {
lang = data.lang,
text = formatted,
q = q,
qq = qq,
a = data.a,
aa = data.aa,
l = data.l,
ll = data.ll,
refs = data.refs,
}
end
return formatted
end
--[==[
Creates a full link, with annotations (see `[[#format_link_annotations|format_link_annotations]]`), in the style of {{tl|l}} or {{tl|m}}.
The first argument, `data`, must be a table. It contains the various elements that can be supplied as parameters to {{tl|l}} or {{tl|m}}:
{ {
term = entry_to_link_to,
alt = link_text_or_displayed_text,
lang = language_object,
sc = script_object,
track_sc = boolean,
no_nonstandard_sc_cat = boolean,
fragment = link_fragment,
id = sense_id,
genders = { "gender1", "gender2", ... },
tr = transliteration,
respect_link_tr = boolean,
ts = transcription,
gloss = gloss,
pos = part_of_speech_tag,
ng = non-gloss text,
lit = literal_translation,
infl = { "form_of_grammar_tag1", "form_of_grammar_tag2", ... },
no_alt_ast = boolean,
accel = {accelerated_creation_tags},
interwiki = interwiki,
pretext = "text_at_beginning" or nil,
posttext = "text_at_end" or nil,
q = { "left_qualifier1", "left_qualifier2", ...} or "left_qualifier",
qq = { "right_qualifier1", "right_qualifier2", ...} or "right_qualifier",
l = { "left_label1", "left_label2", ...},
ll = { "right_label1", "right_label2", ...},
a = { "left_accent_qualifier1", "left_accent_qualifier2", ...},
aa = { "right_accent_qualifier1", "right_accent_qualifier2", ...},
refs = { "formatted_ref1", "formatted_ref2", ...} or { {text = "text", name = "name", group = "group"}, ... },
show_qualifiers = boolean,
} }
Any one of the items in the `data` table may be {nil}, but an error will be shown if neither `term` nor `alt` nor `tr`
is present. Thus, calling {full_link{ term = term, lang = lang, sc = sc }}, where `term` is the page to link to (which
may have diacritics that will be stripped and/or embedded bracketed links) and `lang` is a
[[Module:languages#Language objects|language object]] from [[Module:languages]], will give a plain link similar to the
one produced by the template {{tl|l}}, and calling {full_link( { term = term, lang = lang, sc = sc }, "term" )} will
give a link similar to the one produced by the template {{tl|m}}.
The function will:
* Try to determine the script, based on the characters found in the `term` or `alt` argument, if the script was not
given. If a script is given and `track_sc` is {true}, it will check whether the input script is the same as the one
which would have been automatically generated and add the category [[:Category:LANG terms with redundant script codes]]
if yes, or [[:Category:LANG terms with non-redundant manual script codes]] if no. This should be used when the input
script object is directly determined by a template's `sc` parameter.
* Call `[[#language_link|language_link]]` on the `term` or `alt` forms, to remove diacritics in the page name, process
any embedded wikilinks and create links to Reconstruction or Appendix pages when necessary.
* Call `[[Module:script utilities#tag_text]]` to add the appropriate language and script tags to the term and
italicize terms written in the Latin script if necessary. Accelerated creation tags, as used by [[WT:ACCEL]], are
included.
* Generate a transliteration, based on the `alt` or `term` arguments, if the script is not Latin, no transliteration was
provided in `tr` and the combination of the term's language and script support automatic transliteration. The
transliteration itself will be linked if both `.respect_link_tr` is specified and the language of the term has the
`link_tr` property set for the script of the term; but not otherwise.
* Add the annotations (transliteration, gender, gloss, etc.) after the link.
* If `no_alt_ast` is specified, then the `alt` text does not need to contain an asterisk if the language is
reconstructed. This should only be used by modules which really need to allow links to reconstructions that don't
display asterisks (e.g. number boxes).
* If `pretext` or `posttext` is specified, this is text to (respectively) prepend or append to the output, directly
before processing qualifiers, labels and references. This can be used to add arbitrary extra text inside of the
qualifiers, labels and references.
* If `show_qualifiers` is specified or the `show_qualifiers` argument is given, then left and right qualifiers, accent
qualifiers, labels and references will be displayed, otherwise they will be ignored. (This is because a fair amount of
code stores qualifiers, labels and/or references in these fields and displays them itself, rather than expecting
{full_link()} to display them.)]==]
function export.full_link(data, face, allow_self_link, show_qualifiers)
if type(data) ~= "table" then
error("The first argument to the function full_link must be a table. "
.. "See Module:links/documentation for more information.")
elseif data.term and data.term:find("\\", nil, true) or data.alt and data.alt:find("\\", nil, true) then
track("escaped", "full_link")
end
-- Prevent data from being destructively modified.
local data = shallow_copy(data)
-- FIXME: this shouldn't be added to `data`, as that means the input table needs to be cloned.
data.cats = {}
-- Categorize links to "und".
local lang, cats = data.lang, data.cats
if cats and lang:getCode() == "und" then
insert(cats, "Undetermined language links")
end
local terms = { true }
-- Generate multiple forms if applicable.
for _, param in ipairs { "term", "alt" } do
if type(data[param]) == "string" and data[param]:find("//", nil, true) then
data[param] = export.split_on_slashes(data[param])
elseif type(data[param]) == "string" and not (type(data.term) == "string" and data.term:find("//", nil, true)) then
if not data.no_generate_forms then
data[param] = lang:generateForms(data[param])
else
data[param] = { data[param] }
end
else
data[param] = {}
end
end
for _, param in ipairs { "sc", "tr", "ts" } do
data[param] = { data[param] }
end
for _, param in ipairs { "term", "alt", "sc", "tr", "ts" } do
for i in pairs(data[param]) do
terms[i] = true
end
end
-- Create the link
local output = {}
local id, no_alt_ast, srwc, accel, nevercalltr = data.id, data.no_alt_ast, data.suppress_redundant_wikilink_cat,
data.accel, data.never_call_transliteration_module
local link_tr = data.respect_link_tr and lang:link_tr(data.sc[1])
for i in ipairs(terms) do
local link
-- Is there any text to show?
if (data.term[i] or data.alt[i]) then
-- Try to detect the script if it was not provided
local display_term = data.alt[i] or data.term[i]
local best = lang:findBestScript(display_term)
-- no_nonstandard_sc_cat is intended for use in [[Module:interproject]]
if (
not data.no_nonstandard_sc_cat and
best:getCode() == "None" and
find_best_script_without_lang(display_term):getCode() ~= "None"
) then
insert(cats, lang:getFullName() .. " terms in nonstandard scripts")
end
if not data.sc[i] then
data.sc[i] = best
-- Track uses of sc parameter.
elseif data.track_sc then
if data.sc[i]:getCode() == best:getCode() then
insert(cats, lang:getFullName() .. " terms with redundant script codes")
else
insert(cats, lang:getFullName() .. " terms with non-redundant manual script codes")
end
end
-- If using a discouraged character sequence, add to maintenance category
if data.sc[i]:hasNormalizationFixes() == true then
if (data.term[i] and data.sc[i]:fixDiscouragedSequences(toNFC(data.term[i])) ~= toNFC(data.term[i])) or (data.alt[i] and data.sc[i]:fixDiscouragedSequences(toNFC(data.alt[i])) ~= toNFC(data.alt[i])) then
insert(cats, "Pages using discouraged character sequences")
end
end
link = simple_link(
data.term[i],
data.fragment,
data.alt[i],
lang,
data.sc[i],
id,
cats,
no_alt_ast,
srwc
)
end
-- simple_link can return nil, so check if a link has been generated.
if link then
-- Add "nowrap" class to prefixes in order to prevent wrapping after the hyphen
local nowrap
local display_term = data.alt[i] or data.term[i]
if display_term and (display_term:find("^%-") or display_term:find("^־")) then -- Hebrew maqqef -- FIXME, use hyphens from [[Module:affix]]
nowrap = "nowrap"
end
link = tag_text(link, lang, data.sc[i], face, get_class(lang, data.tr[i], accel, nowrap))
else
--[[ No term to show.
Is there at least a transliteration we can work from? ]]
link = request_script(lang, data.sc[i])
-- No link to show, and no transliteration either. Show a term request (unless it's a substrate, as they rarely take terms).
if (link == "" or (not data.tr[i]) or data.tr[i] == "-") and lang:getFamilyCode() ~= "qfa-sub" then
-- If there are multiple terms, break the loop instead.
if i > 1 then
remove(output)
break
elseif NAMESPACE ~= "Template" then
insert(cats, lang:getFullName() .. " term requests")
end
link = "<small>[Term?]</small>"
end
end
insert(output, link)
if i < #terms then insert(output, "<span class=\"Zsym mention\" style=\"font-size:100%;\"> / </span>") end
end
-- When suppress_tr is true, do not show or generate any transliteration
if data.suppress_tr then
data.tr[1] = nil
else
-- TODO: Currently only handles the first transliteration, pending consensus on how to handle multiple translits for multiple forms, as this is not always desirable (e.g. traditional/simplified Chinese).
if data.tr[1] == "" or data.tr[1] == "-" then
data.tr[1] = nil
else
local phonetic_extraction = load_data("Module:links/data").phonetic_extraction
phonetic_extraction = phonetic_extraction[lang:getCode()] or phonetic_extraction[lang:getFullCode()]
if phonetic_extraction then
data.tr[1] = data.tr[1] or
require(phonetic_extraction).getTranslit(export.remove_links(data.alt[1] or data.term[1]))
elseif (data.term[1] or data.alt[1]) and data.sc[1]:isTransliterated() then
-- Track whenever there is manual translit. The categories below like 'terms with redundant transliterations'
-- aren't sufficient because they only work with reference to automatic translit and won't operate at all in
-- languages without any automatic translit, like Persian and Hebrew.
if data.tr[1] then
local full_code = lang:getFullCode()
track("manual-tr", full_code)
end
if not nevercalltr then
-- Try to generate a transliteration.
local text = data.alt[1] or data.term[1]
if not link_tr then
text = export.remove_links(text, true)
end
local automated_tr = lang:transliterate(text, data.sc[1])
if automated_tr then
local manual_tr = data.tr[1]
if manual_tr then
if export.remove_links(manual_tr) == export.remove_links(automated_tr) then
insert(cats, lang:getFullName() .. " terms with redundant transliterations")
else
-- Prevents Arabic root categories from flooding the tracking categories.
if NAMESPACE ~= "Category" then
insert(cats,
lang:getFullName() .. " terms with non-redundant manual transliterations")
end
end
end
if not manual_tr or lang:overrideManualTranslit(data.sc[1]) then
data.tr[1] = automated_tr
end
end
end
end
end
end
-- Link to the transliteration entry for languages that require this
if data.tr[1] and link_tr and not data.tr[1]:match("%[%[(.-)%]%]") then
data.tr[1] = simple_link(
data.tr[1],
nil,
nil,
lang,
get_script("Latn"),
nil,
cats,
no_alt_ast,
srwc
)
elseif data.tr[1] and not link_tr then
-- Remove the pseudo-HTML tags added by remove_links.
data.tr[1] = data.tr[1]:gsub("</?link>", "")
end
if data.tr[1] and not umatch(data.tr[1], "[^%s%p]") then data.tr[1] = nil end
insert(output, export.format_link_annotations(data, face))
if data.pretext then
insert(output, 1, data.pretext)
end
if data.posttext then
insert(output, data.posttext)
end
local categories = cats[1] and format_categories(cats, lang, "-", nil, nil, data.sc) or ""
output = concat(output)
if show_qualifiers or data.show_qualifiers then
output = add_qualifiers_and_refs_to_term(data, output)
end
return output .. categories
end
--[==[Replaces all wikilinks with their displayed text, and removes any categories. This function can be invoked either from a template or from another module.
-- Strips links: deletes category links, the targets of piped links, and any double square brackets involved in links (other than file links, which are untouched). If `tag` is set, then any links removed will be given pseudo-HTML tags, which allow the substitution functions in [[Module:languages]] to properly subdivide the text in order to reduce the chance of substitution failures in modules which scrape pages like [[Module:zh-translit]].
-- FIXME: This is quite hacky. We probably want this to be integrated into [[Module:languages]], but we can't do that until we know that nothing is pushing pipe linked transliterations through it for languages which don't have link_tr set.
* <code><nowiki>[[page|displayed text]]</nowiki></code> → <code><nowiki>displayed text</nowiki></code>
* <code><nowiki>[[page and displayed text]]</nowiki></code> → <code><nowiki>page and displayed text</nowiki></code>
* <code><nowiki>[[Category:English lemmas|WORD]]</nowiki></code> → ''(nothing)'']==]
function export.remove_links(text, tag)
if type(text) == "table" then
text = text.args[1]
end
if not text or text == "" then
return ""
end
text = text
:gsub("%[%[", "\1")
:gsub("%]%]", "\2")
-- Parse internal links for the display text.
text = text:gsub("(\1)([^\1\2]-)(\2)",
function(c1, c2, c3)
-- Don't remove files.
for _, false_positive in ipairs({ "file", "image" }) do
if c2:lower():match("^" .. false_positive .. ":") then return c1 .. c2 .. c3 end
end
-- Remove categories completely.
for _, false_positive in ipairs({ "category", "cat" }) do
if c2:lower():match("^" .. false_positive .. ":") then return "" end
end
-- In piped links, remove all text before the pipe, unless it's the final character (i.e. the pipe trick), in which case just remove the pipe.
c2 = c2:match("^[^|]*|(.+)") or c2:match("([^|]+)|$") or c2
if tag then
return "<link>" .. c2 .. "</link>"
else
return c2
end
end)
text = text
:gsub("\1", "[[")
:gsub("\2", "]]")
return text
end
function export.section_link(link)
if type(link) ~= "string" then
error("The first argument to section_link was a " .. type(link) .. ", but it should be a string.")
elseif link:find("\\", nil, true) then
track("escaped", "section_link")
end
local target, section = get_fragment((link:gsub("_", " ")))
if not section then
error("No \"#\" delineating a section name")
end
return simple_link(
target,
section,
target .. " § " .. section
)
end
return export
go1a5j6fymqq8baizjbz87vusf7r536
মডিউল:languages
828
6288
507786
323676
2026-04-14T06:40:47Z
Redmin
6857
[[en:Module:languages|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507786
Scribunto
text/plain
--[==[ intro:
This module implements fetching of language-specific information and processing text in a given language.
===Types of languages===
There are two types of languages: full languages and etymology-only languages. The essential difference is that only
full languages appear in L2 headings in vocabulary entries, and hence categories like [[:Category:French nouns]] exist
only for full languages. Etymology-only languages have either a full language or another etymology-only language as
their parent (in the parent-child inheritance sense), and for etymology-only languages with another etymology-only
language as their parent, a full language can always be derived by following the parent links upwards. For example,
"Canadian French", code `fr-CA`, is an etymology-only language whose parent is the full language "French", code `fr`.
An example of an etymology-only language with another etymology-only parent is "Northumbrian Old English", code
`ang-nor`, which has "Anglian Old English", code `ang-ang` as its parent; this is an etymology-only language whose
parent is "Old English", code `ang`, which is a full language. (This is because Northumbrian Old English is considered
a variety of Anglian Old English.) Sometimes the parent is the "Undetermined" language, code `und`; this is the case,
for example, for "substrate" languages such as "Pre-Greek", code `qsb-grc`, and "the BMAC substrate", code `qsb-bma`.
It is important to distinguish language ''parents'' from language ''ancestors''. The parent-child relationship is one
of containment, i.e. if X is a child of Y, X is considered a variety of Y. On the other hand, the ancestor-descendant
relationship is one of descent in time. For example, "Classical Latin", code `la-cla`, and "Late Latin", code `la-lat`,
are both etymology-only languages with "Latin", code `la`, as their parents, because both of the former are varieties
of Latin. However, Late Latin does *NOT* have Classical Latin as its parent because Late Latin is *not* a variety of
Classical Latin; rather, it is a descendant. There is in fact a separate `ancestors` field that is used to express the
ancestor-descendant relationship, and Late Latin's ancestor is given as Classical Latin. It is also important to note
that sometimes an etymology-only language is actually the conceptual ancestor of its parent language. This happens,
for example, with "Old Italian" (code `roa-oit`), which is an etymology-only variant of full language "Italian" (code
`it`), and with "Old Latin" (code `itc-ola`), which is an etymology-only variant of Latin. In both cases, the full
language has the etymology-only variant listed as an ancestor. This allows a Latin term to inherit from Old Latin
using the {{tl|inh}} template (where in this template, "inheritance" refers to ancestral inheritance, i.e. inheritance
in time, rather than in the parent-child sense); likewise for Italian and Old Italian.
Full languages come in three subtypes:
* {regular}: This indicates a full language that is attested according to [[WT:CFI]] and therefore permitted in the
main namespace. There may also be reconstructed terms for the language, which are placed in the
{Reconstruction} namespace and must be prefixed with * to indicate a reconstruction. Most full languages
are natural (not constructed) languages, but a few constructed languages (e.g. Esperanto and Volapük,
among others) are also allowed in the mainspace and considered regular languages.
* {reconstructed}: This language is not attested according to [[WT:CFI]], and therefore is allowed only in the
{Reconstruction} namespace. All terms in this language are reconstructed, and must be prefixed with
*. Languages such as Proto-Indo-European and Proto-Germanic are in this category.
* {appendix-constructed}: This language is attested but does not meet the additional requirements set out for
constructed languages ([[WT:CFI#Constructed languages]]). Its entries must therefore be in
the Appendix namespace, but they are not reconstructed and therefore should not have *
prefixed in links. Most constructed languages are of this subtype.
Both full languages and etymology-only languages have a {Language} object associated with them, which is fetched using
the {getByCode} function in [[Module:languages]] to convert a language code to a {Language} object. Depending on the
options supplied to this function, etymology-only languages may or may not be accepted, and family codes may be
accepted (returning a {Family} object as described in [[Module:families]]). There are also separate {getByCanonicalName}
functions in [[Module:languages]] and [[Module:etymology languages]] to convert a language's canonical name to a
{Language} object (depending on whether the canonical name refers to a full or etymology-only language).
===Textual representations===
Textual strings belonging to a given language come in several different ''text variants'':
# The ''input text'' is what the user supplies in wikitext, in the parameters to {{tl|m}}, {{tl|l}}, {{tl|ux}},
{{tl|t}}, {{tl|lang}} and the like.
# The ''corrected input text'' is the input text with some corrections and/or normalizations applied, such as
bad-character replacements for certain languages, like replacing `l` or `1` to [[palochka]] in some languages written
in Cyrillic. (FIXME: This currently goes under the name ''display text'' but that will be repurposed below. Also,
[[User:Surjection]] suggests renaming this to ''normalized input text'', but "normalized" is used in a different sense
in [[Module:usex]].)
# The ''display text'' is the text in the form as it will be displayed to the user. This is what appears in headwords,
in usexes, in displayed internal links, etc. This can include accent marks that are removed to form the stripped
display text (see below), as well as embedded bracketed links that are variously processed further. The display text
is generated from the corrected input text by applying language-specific transformations; for most languages, there
will be no such transformations. The general reason for having a difference between input and display text is to allow
for extra information in the input text that is not displayed to the user but is sent to the transliteration module.
Note that having different display and input text is only supported currently through special-casing but will be
generalized. Examples of transformations are: (1) Removing the {{cd|^}} that is used in certain East Asian (and
possibly other unicameral) languages to indicate capitalization of the transliteration (which is currently
special-cased); (2) for Korean, removing or otherwise processing hyphens (which is currently special-cased); (3) for
Arabic, removing a ''sukūn'' diacritic placed over a ''tāʔ marbūṭa'' (like this: ةْ) to indicate that the
''tāʔ marbūṭa'' is pronounced and transliterated as /t/ instead of being silent [NOTE, NOT IMPLEMENTED YET]; (4) for
Thai and Khmer, converting space-separated words to bracketed words and resolving respelling substitutions such as
`[กรีน/กฺรีน]`, which indicate how to transliterate given words [NOTE, NOT IMPLEMENTED YET except in language-specific
templates like {{tl|th-usex}}].
## The ''right-resolved display text'' is the result of removing brackets around one-part embedded links and resolving
two-part embedded links into their right-hand components (i.e. converting two-part links into the displayed form).
The process of right-resolution is what happens when you call {{cd|remove_links()}} in [[Module:links]] on some text.
When applied to the display text, it produces exactly what the user sees, without any link markup.
# The ''stripped display text'' is the result of applying diacritic-stripping to the display text.
## The ''left-resolved stripped display text'' [NEED BETTER NAME] is the result of applying left-resolution to the
stripped display text, i.e. similar to right-resolution but resolving two-part embedded links into their left-hand
components (i.e. the linked-to page). If the display text refers to a single page, the resulting of applying
diacritic stripping and left-resolution produces the ''logical pagename''.
# The ''physical pagename text'' is the result of converting the stripped display text into physical page links. If the
stripped display text contains embedded links, the left side of those links is converted into physical page links;
otherwise, the entire text is considered a pagename and converted in the same fashion. The conversion does three
things: (1) converts characters not allowed in pagenames into their "unsupported title" representation, e.g.
{{cd|Unsupported titles/`gt`}} in place of the logical name {{cd|>}}; (2) handles certain special-cased
unsupported-title logical pagenames, such as {{cd|Unsupported titles/Space}} in place of {{cd|[space]}} and
{{cd|Unsupported titles/Ancient Greek dish}} in place of a very long Greek name for a gourmet dish as found in
Aristophanes; (3) converts "mammoth" pagenames such as [[a]] into their appropriate split component, e.g.
[[a/languages A to L]].
# The ''source translit text'' is the text as supplied to the language-specific {{cd|transliterate()}} method. The form
of the source translit text may need to be language-specific, e.g Thai and Khmer will need the corrected input text,
whereas other languages may need to work off the display text. [FIXME: It's still unclear to me how embedded bracketed
links are handled in the existing code.] In general, embedded links need to be right-resolved (see above), but when
this happens is unclear to me [FIXME]. Some languages have a chop-up-and-paste-together scheme that sends parts of the
text through the transliterate mechanism, and for others (those listed with "cont" in {{cd|substitution}} in
[[Module:languages/data]]) they receive the full input text, but preprocessed in certain ways. (The wisdom of this is
still unclear to me.)
# The ''transliterated text'' (or ''transliteration'') is the result of transliterating the source translit text. Unlike
for all the other text variants except the transcribed text, it is always in the Latin script.
# The ''transcribed text'' (or ''transcription'') is the result of transcribing the source translit text, where
"transcription" here means a close approximation to the phonetic form of the language in languages (e.g. Akkadian,
Sumerian, Ancient Egyptian, maybe Tibetan) that have a wide difference between the written letters and spoken form.
Unlike for all the other text variants other than the transliterated text, it is always in the Latin script.
Currently, the transcribed text is always supplied manually be the user; there is no such thing as a
{{cd|transcribe()}} method on language objects.
# The ''sort key'' is the text used in sort keys for determining the placing of pages in categories they belong to. The
sort key is generated from the pagename or a specified ''sort base'' by lowercasing, doing language-specific
transformations and then uppercasing the result. If the sort base is supplied and is generated from input text, it
needs to be converted to display text, have embedded links removed through right-resolution and have
diacritic-stripping applied.
# There are other text variants that occur in usexes (specifically, there are normalized variants of several of the
above text variants), but we can skip them for now.
The following methods exist on {Language} objects to convert between different text variants:
# {correctInputText} (currently called {makeDisplayText}): This converts input text to corrected input text.
# {stripDiacritics}: This converts to stripped display text. [FIXME: This needs some rethinking. In particular,
{stripDiacritics} is sometimes called on input text, corrected input text or display text (in various paths inside of
[[Module:links]], and, in the case of input text, usually from other modules). We need to make sure we don't try to
convert input text to display text twice, but at the same time we need to support calling it directly on input text
since so many modules do this. This means we need to add a parameter indicating whether the passed-in text is input,
corrected input, or display text; if the former two, we call {correctInputText} ourselves.]
# {logicalToPhysical}: This converts logical pagenames to physical pagenames.
# {transliterate}: This appears to convert input text with embedded brackets removed into a transliteration.
[FIXME: This needs some rethinking. In particular, it calls {processDisplayText} on its input, which won't work
for Thai and Khmer, so we may need language-specific flags indicating whether to pass the input text directly to the
language transliterate method. In addition, I'm not sure how embedded links are handled in the existing translit code;
a lot of callers remove the links themselves before calling {transliterate()}, which I assume is wrong.]
# {makeSortKey}: This converts display text (?) to a sort key. [FIXME: Clarify this.]
]==]
local export = {}
local debug_track_module = "Module:debug/track"
local etymology_languages_data_module = "Module:etymology languages/data"
local families_module = "Module:families"
local headword_page_module = "Module:headword/page"
local json_module = "Module:JSON"
local language_like_module = "Module:language-like"
local languages_data_module = "Module:languages/data"
local languages_data_patterns_module = "Module:languages/data/patterns"
local links_data_module = "Module:links/data"
local load_module = "Module:load"
local scripts_module = "Module:scripts"
local scripts_data_module = "Module:scripts/data"
local string_encode_entities_module = "Module:string/encode entities"
local string_pattern_escape_module = "Module:string/patternEscape"
local string_replacement_escape_module = "Module:string/replacementEscape"
local string_utilities_module = "Module:string utilities"
local table_module = "Module:table"
local utilities_module = "Module:utilities"
local wikimedia_languages_module = "Module:wikimedia languages"
local mw = mw
local string = string
local table = table
local char = string.char
local concat = table.concat
local find = string.find
local floor = math.floor
local get_by_code -- Defined below.
local get_data_module_name -- Defined below.
local get_extra_data_module_name -- Defined below.
local getmetatable = getmetatable
local gmatch = string.gmatch
local gsub = string.gsub
local insert = table.insert
local ipairs = ipairs
local is_known_language_tag = mw.language.isKnownLanguageTag
local make_object -- Defined below.
local match = string.match
local next = next
local pairs = pairs
local remove = table.remove
local require = require
local select = select
local setmetatable = setmetatable
local sub = string.sub
local type = type
local unstrip = mw.text.unstrip
-- Loaded as needed by findBestScript.
local Hans_chars
local Hant_chars
local function check_object(...)
check_object = require(utilities_module).check_object
return check_object(...)
end
local function debug_track(...)
debug_track = require(debug_track_module)
return debug_track(...)
end
local function decode_entities(...)
decode_entities = require(string_utilities_module).decode_entities
return decode_entities(...)
end
local function decode_uri(...)
decode_uri = require(string_utilities_module).decode_uri
return decode_uri(...)
end
local function deep_copy(...)
deep_copy = require(table_module).deepCopy
return deep_copy(...)
end
local function encode_entities(...)
encode_entities = require(string_encode_entities_module)
return encode_entities(...)
end
local function get_L2_sort_key(...)
get_L2_sort_key = require(headword_page_module).get_L2_sort_key
return get_L2_sort_key(...)
end
local function get_script(...)
get_script = require(scripts_module).getByCode
return get_script(...)
end
local function find_best_script_without_lang(...)
find_best_script_without_lang = require(scripts_module).findBestScriptWithoutLang
return find_best_script_without_lang(...)
end
local function get_family(...)
get_family = require(families_module).getByCode
return get_family(...)
end
local function get_plaintext(...)
get_plaintext = require(utilities_module).get_plaintext
return get_plaintext(...)
end
local function get_wikimedia_lang(...)
get_wikimedia_lang = require(wikimedia_languages_module).getByCode
return get_wikimedia_lang(...)
end
local function keys_to_list(...)
keys_to_list = require(table_module).keysToList
return keys_to_list(...)
end
local function list_to_set(...)
list_to_set = require(table_module).listToSet
return list_to_set(...)
end
local function load_data(...)
load_data = require(load_module).load_data
return load_data(...)
end
local function make_family_object(...)
make_family_object = require(families_module).makeObject
return make_family_object(...)
end
local function pattern_escape(...)
pattern_escape = require(string_pattern_escape_module)
return pattern_escape(...)
end
local function replacement_escape(...)
replacement_escape = require(string_replacement_escape_module)
return replacement_escape(...)
end
local function safe_require(...)
safe_require = require(load_module).safe_require
return safe_require(...)
end
local function shallow_copy(...)
shallow_copy = require(table_module).shallowCopy
return shallow_copy(...)
end
local function split(...)
split = require(string_utilities_module).split
return split(...)
end
local function to_json(...)
to_json = require(json_module).toJSON
return to_json(...)
end
local function u(...)
u = require(string_utilities_module).char
return u(...)
end
local function ugsub(...)
ugsub = require(string_utilities_module).gsub
return ugsub(...)
end
local function ulen(...)
ulen = require(string_utilities_module).len
return ulen(...)
end
local function ulower(...)
ulower = require(string_utilities_module).lower
return ulower(...)
end
local function umatch(...)
umatch = require(string_utilities_module).match
return umatch(...)
end
local function uupper(...)
uupper = require(string_utilities_module).upper
return uupper(...)
end
local function track(page)
debug_track("languages/" .. page)
return true
end
local function normalize_code(code)
return load_data(languages_data_module).aliases[code] or code
end
local function check_inputs(self, check, default, ...)
local n = select("#", ...)
if n == 0 then
return false
end
local ret = check(self, (...))
if ret ~= nil then
return ret
elseif n > 1 then
local inputs = {...}
for i = 2, n do
ret = check(self, inputs[i])
if ret ~= nil then
return ret
end
end
end
return default
end
local function make_link(self, target, display)
local prefix, main
if self:getFamilyCode() == "qfa-sub" then
prefix, main = display:match("^(the )(.*)")
if not prefix then
prefix, main = display:match("^(a )(.*)")
end
end
return (prefix or "") .. "[[" .. target .. "|" .. (main or display) .. "]]"
end
-- Convert risky characters to HTML entities, which minimizes interference once returned (e.g. for "sms:a", "<!-- -->" etc.).
local function escape_risky_characters(text)
-- Spacing characters in isolation generally need to be escaped in order to be properly processed by the MediaWiki software.
if umatch(text, "^%s*$") then
return encode_entities(text, text)
end
return encode_entities(text, "!#%&*+/:;<=>?@[\\]_{|}")
end
-- Temporarily convert various formatting characters to PUA to prevent them from being disrupted by the substitution process.
local function doTempSubstitutions(text, subbedChars, keepCarets, noTrim)
-- Clone so that we don't insert any extra patterns into the table in package.loaded. For some reason, using require seems to keep memory use down; probably because the table is always cloned.
local patterns = shallow_copy(require(languages_data_patterns_module))
if keepCarets then
insert(patterns, "((\\+)%^)")
insert(patterns, "((%^))")
end
-- Ensure any whitespace at the beginning and end is temp substituted, to prevent it from being accidentally trimmed. We only want to trim any final spaces added during the substitution process (e.g. by a module), which means we only do this during the first round of temp substitutions.
if not noTrim then
insert(patterns, "^([\128-\191\244]*(%s+))")
insert(patterns, "((%s+)[\128-\191\244]*)$")
end
-- Pre-substitution, of "[[" and "]]", which makes pattern matching more accurate.
text = gsub(text, "%f[%[]%[%[", "\1"):gsub("%f[%]]%]%]", "\2")
local i = #subbedChars
for _, pattern in ipairs(patterns) do
-- Patterns ending in \0 stand are for things like "[[" or "]]"), so the inserted PUA are treated as breaks between terms by modules that scrape info from pages.
local term_divider
pattern = gsub(pattern, "%z$", function(divider)
term_divider = divider == "\0"
return ""
end)
text = gsub(text, pattern, function(...)
local m = {...}
local m1New = m[1]
for k = 2, #m do
local n = i + k - 1
subbedChars[n] = m[k]
local byte2 = floor(n / 4096) % 64 + (term_divider and 128 or 136)
local byte3 = floor(n / 64) % 64 + 128
local byte4 = n % 64 + 128
m1New = gsub(m1New, pattern_escape(m[k]), "\244" .. char(byte2) .. char(byte3) .. char(byte4), 1)
end
i = i + #m - 1
return m1New
end)
end
text = gsub(text, "\1", "%[%["):gsub("\2", "%]%]")
return text, subbedChars
end
-- Reinsert any formatting that was temporarily substituted.
local function undoTempSubstitutions(text, subbedChars)
for i = 1, #subbedChars do
local byte2 = floor(i / 4096) % 64 + 128
local byte3 = floor(i / 64) % 64 + 128
local byte4 = i % 64 + 128
text = gsub(text, "\244[" .. char(byte2) .. char(byte2+8) .. "]" .. char(byte3) .. char(byte4),
replacement_escape(subbedChars[i]))
end
text = gsub(text, "\1", "%[%["):gsub("\2", "%]%]")
return text
end
-- Check if the raw text is an unsupported title, and if so return that. Otherwise, remove HTML entities. We do the pre-conversion to avoid loading the unsupported title list unnecessarily.
local function checkNoEntities(self, text)
local textNoEnc = decode_entities(text)
if textNoEnc ~= text and load_data(links_data_module).unsupported_titles[text] then
return text
else
return textNoEnc
end
end
-- If no script object is provided (or if it's invalid or None), get one.
local function checkScript(text, self, sc)
if not check_object("script", true, sc) or sc:getCode() == "None" then
return self:findBestScript(text)
end
return sc
end
local function normalize(text, sc)
text = sc:fixDiscouragedSequences(text)
return sc:toFixedNFD(text)
end
-- Subfunction of iterateSectionSubstitutions(). Process an individual chunk of text according to the specifications in
-- `substitution_data`. The input parameters are all as in the documentation of iterateSectionSubstitutions() except for
-- `recursed`, which is set to true if we called ourselves recursively to process a script-specific setting or
-- script-wide fallback. Returns two values: the processed text and the actual substitution data used to do the
-- substitutions (same as the `actual_substitution_data` return value to iterateSectionSubstitutions()).
local function doSubstitutions(self, text, sc, substitution_data, data_field, function_name, recursed)
-- BE CAREFUL in this function because the value at any level can be `false`, which causes no processing to be done
-- and blocks any further fallback processing.
local actual_substitution_data = substitution_data
-- If there are language-specific substitutes given in the data module, use those.
if type(substitution_data) == "table" then
-- If a script is specified, run this function with the script-specific data before continuing.
local sc_code = sc:getCode()
local has_substitution_data = false
if substitution_data[sc_code] ~= nil then
has_substitution_data = true
if substitution_data[sc_code] then
text, actual_substitution_data = doSubstitutions(self, text, sc, substitution_data[sc_code], data_field,
function_name, true)
end
-- Hant, Hans and Hani are usually treated the same, so add a special case to avoid having to specify each one
-- separately.
elseif sc_code:match("^Han") and substitution_data.Hani ~= nil then
has_substitution_data = true
if substitution_data.Hani then
text, actual_substitution_data = doSubstitutions(self, text, sc, substitution_data.Hani, data_field,
function_name, true)
end
-- Substitution data with key 1 in the outer table may be given as a fallback.
elseif substitution_data[1] ~= nil then
has_substitution_data = true
if substitution_data[1] then
text, actual_substitution_data = doSubstitutions(self, text, sc, substitution_data[1], data_field,
function_name, true)
end
end
-- Iterate over all strings in the "from" subtable, and gsub with the corresponding string in "to". We work with
-- the NFD decomposed forms, as this simplifies many substitutions.
if substitution_data.from then
has_substitution_data = true
for i, from in ipairs(substitution_data.from) do
-- Normalize each loop, to ensure multi-stage substitutions work correctly.
text = sc:toFixedNFD(text)
text = ugsub(text, sc:toFixedNFD(from), substitution_data.to[i] or "")
end
end
if substitution_data.remove_diacritics then
has_substitution_data = true
text = sc:toFixedNFD(text)
-- Convert exceptions to PUA.
local remove_exceptions, substitutes = substitution_data.remove_exceptions
if remove_exceptions then
substitutes = {}
local i = 0
for _, exception in ipairs(remove_exceptions) do
exception = sc:toFixedNFD(exception)
text = ugsub(text, exception, function(m)
i = i + 1
local subst = u(0x80000 + i)
substitutes[subst] = m
return subst
end)
end
end
-- Strip diacritics.
text = ugsub(text, "[" .. substitution_data.remove_diacritics .. "]", "")
-- Convert exceptions back.
if remove_exceptions then
text = text:gsub("\242[\128-\191]*", substitutes)
end
end
if not has_substitution_data and sc._data[data_field] then
-- If language-specific sort key (etc.) is nil, fall back to script-wide sort key (etc.).
text, actual_substitution_data = doSubstitutions(self, text, sc, sc._data[data_field], data_field,
function_name, true)
end
elseif type(substitution_data) == "string" then
-- If there is a dedicated function module, use that.
local module = safe_require("Module:" .. substitution_data)
if module then
-- TODO: translit functions should take objects, not codes.
-- TODO: translit functions should be called with form NFD.
if function_name == "tr" then
if not module[function_name] then
error(("Internal error: Module [[%s]] has no function named 'tr'"):format(substitution_data))
end
text = module[function_name](text, self._code, sc:getCode())
elseif function_name == "stripDiacritics" then
-- FIXME, get rid of this arm after renaming makeEntryName -> stripDiacritics.
if module[function_name] then
text = module[function_name](sc:toFixedNFD(text), self, sc)
elseif module.makeEntryName then
text = module.makeEntryName(sc:toFixedNFD(text), self, sc)
else
error(("Internal error: Module [[%s]] has no function named 'stripDiacritics' or 'makeEntryName'"
):format(substitution_data))
end
else
if not module[function_name] then
error(("Internal error: Module [[%s]] has no function named '%s'"):format(
substitution_data, function_name))
end
text = module[function_name](sc:toFixedNFD(text), self, sc)
end
else
error("Substitution data '" .. substitution_data .. "' does not match an existing module.")
end
elseif substitution_data == nil and sc._data[data_field] then
-- If language-specific sort key (etc.) is nil, fall back to script-wide sort key (etc.).
text, actual_substitution_data = doSubstitutions(self, text, sc, sc._data[data_field], data_field,
function_name, true)
end
-- Don't normalize to NFC if this is the inner loop or if a module returned nil.
if recursed or not text then
return text, actual_substitution_data
end
-- Fix any discouraged sequences created during the substitution process, and normalize into the final form.
return sc:toFixedNFC(sc:fixDiscouragedSequences(text)), actual_substitution_data
end
-- Split the text into sections, based on the presence of temporarily substituted formatting characters, then iterate
-- over each section to apply substitutions (e.g. transliteration or diacritic stripping). This avoids putting PUA
-- characters through language-specific modules, which may be unequipped for them. This function is passed the following
-- values:
-- * `self` (the Language object);
-- * `text` (the text to process);
-- * `sc` (the script of the text, which must be specified; callers should call checkScript() as needed to autodetect the
-- script of the text if not given explicitly by the user);
-- * `subbedChars` (an array of the same length as the text, indicating which characters have been substituted and by
-- what, or {nil} if no substitutions are to happen);
-- * `keepCarets` (DOCUMENT ME);
-- * `substitution_data` (the data indicating which substitutions to apply, taken directly from `data_field` in the
-- language's data structure in a submodule of [[Module:languages/data]]);
-- * `data_field` (the data field from which `substitution_data` was fetched, such as "sort_key" or "strip_diacritics");
-- * `function_name` (the name of the function to call to do the substitution, in case `substitution_data` specifies a
-- module to do the substitution);
-- * `notrim` (don't trim whitespace at the edges of `text`; set when computing the sort key, because whitespace at the
-- beginning of a sort key is significant and causes the resulting page to be sorted at the beginning of the category
-- it's in).
-- Returns three values:
-- (1) the processed text;
-- (2) the value of `subbedChars` that was passed in, possibly modified with additional character substitutions; will be
-- {nil} if {nil} was passed in;
-- (3) the actual substitution data that was used to apply substitutions to `text`; this may be different from the value
-- of `substitution_data` passed in if that value recursively specified script-specific substitutions or if no
-- substitution data could be found in the language-specific data (e.g. {nil} was passed in or a structure was passed
-- in that had no setting for the script given in `sc`), but a script-wide fallback value was set; currently it is
-- only used by makeSortKey().
local function iterateSectionSubstitutions(self, text, sc, subbedChars, keepCarets, substitution_data, data_field,
function_name, notrim)
local sections
-- See [[Module:languages/data]].
if not find(text, "\244") or load_data(languages_data_module).substitution[self._code] == "cont" then
sections = {text}
else
sections = split(text, "\244[\128-\143][\128-\191]*", true)
end
local actual_substitution_data
for _, section in ipairs(sections) do
-- Don't bother processing empty strings or whitespace (which may also not be handled well by dedicated
-- modules).
if gsub(section, "%s+", "") ~= "" then
local sub, this_actual_substitution_data = doSubstitutions(self, section, sc, substitution_data, data_field,
function_name)
actual_substitution_data = this_actual_substitution_data
-- Second round of temporary substitutions, in case any formatting was added by the main substitution
-- process. However, don't do this if the section contains formatting already (as it would have had to have
-- been escaped to reach this stage, and therefore should be given as raw text).
if sub and subbedChars then
local noSub
for _, pattern in ipairs(require(languages_data_patterns_module)) do
if match(section, pattern .. "%z?") then
noSub = true
end
end
if not noSub then
sub, subbedChars = doTempSubstitutions(sub, subbedChars, keepCarets, true)
end
end
if not sub then
text = sub
break
end
text = sub and gsub(text, pattern_escape(section), replacement_escape(sub), 1) or text
end
end
if not notrim then
-- Trim, unless there are only spacing characters, while ignoring any final formatting characters.
-- Do not trim sort keys because spaces at the beginning are significant.
text = text and text:gsub("^([\128-\191\244]*)%s+(%S)", "%1%2"):gsub("(%S)%s+([\128-\191\244]*)$", "%1%2") or
nil
end
return text, subbedChars, actual_substitution_data
end
-- Process carets (and any escapes). Default to simple removal, if no pattern/replacement is given.
local function processCarets(text, pattern, repl)
local rep
repeat
text, rep = gsub(text, "\\\\(\\*^)", "\3%1")
until rep == 0
return (text:gsub("\\^", "\4")
:gsub(pattern or "%^", repl or "")
:gsub("\3", "\\")
:gsub("\4", "^"))
end
-- Remove carets if they are used to capitalize parts of transliterations (unless they have been escaped).
local function removeCarets(text, sc)
if not sc:hasCapitalization() and sc:isTransliterated() and text:find("^", 1, true) then
return processCarets(text)
else
return text
end
end
local Language = {}
--[==[Returns the language code of the language. Example: {{code|lua|"fr"}} for French.]==]
function Language:getCode()
return self._code
end
--[==[Returns the canonical name of the language. This is the name used to represent that language on Wiktionary, and is guaranteed to be unique to that language alone. Example: {{code|lua|"French"}} for French.]==]
function Language:getCanonicalName()
local name = self._name
if name == nil then
name = self._data[1]
self._name = name
end
return name
end
--[==[
Return the display form of the language. The display form of a language, family or script is the form it takes when
appearing as the <code><var>source</var></code> in categories such as <code>English terms derived from
<var>source</var></code> or <code>English given names from <var>source</var></code>, and is also the displayed text
in {makeCategoryLink()} links. For full and etymology-only languages, this is the same as the canonical name, but
for families, it reads <code>"<var>name</var> languages"</code> (e.g. {"Indo-Iranian languages"}), and for scripts,
it reads <code>"<var>name</var> script"</code> (e.g. {"Arabic script"}).
]==]
function Language:getDisplayForm()
local form = self._displayForm
if form == nil then
form = self:getCanonicalName()
-- Add article and " substrate" to substrates that lack them.
if self:getFamilyCode() == "qfa-sub" then
if not (sub(form, 1, 4) == "the " or sub(form, 1, 2) == "a ") then
form = "a " .. form
end
if not match(form, " [Ss]ubstrate") then
form = form .. " substrate"
end
end
self._displayForm = form
end
return form
end
--[==[Returns the value which should be used in the HTML lang= attribute for tagged text in the language.]==]
function Language:getHTMLAttribute(sc, region)
local code = self._code
if not find(code, "-", 1, true) then
return code .. "-" .. sc:getCode() .. (region and "-" .. region or "")
end
local parent = self:getParent()
region = region or match(code, "%f[%u][%u-]+%f[%U]")
if parent then
return parent:getHTMLAttribute(sc, region)
end
-- TODO: ISO family codes can also be used.
return "mis-" .. sc:getCode() .. (region and "-" .. region or "")
end
--[==[Returns a table of the aliases that the language is known by, excluding the canonical name. Aliases are synonyms for the language in question. The names are not guaranteed to be unique, in that sometimes more than one language is known by the same name. Example: {{code|lua|{"High German", "New High German", "Deutsch"} }} for [[:Category:German language|German]].]==]
function Language:getAliases()
self:loadInExtraData()
return require(language_like_module).getAliases(self)
end
--[==[
Return a table of the known subvarieties of a given language, excluding subvarieties that have been given
explicit etymology-only language codes. The names are not guaranteed to be unique, in that sometimes a given name
refers to a subvariety of more than one language. Example: {{code|lua|{"Southern Aymara", "Central Aymara"} }} for
[[:Category:Aymara language|Aymara]]. Note that the returned value can have nested tables in it, when a subvariety
goes by more than one name. Example: {{code|lua|{"North Azerbaijani", "South Azerbaijani", {"Afshar", "Afshari",
"Afshar Azerbaijani", "Afchar"}, {"Qashqa'i", "Qashqai", "Kashkay"}, "Sonqor"} }} for
[[:Category:Azerbaijani language|Azerbaijani]]. Here, for example, Afshar, Afshari, Afshar Azerbaijani and Afchar
all refer to the same subvariety, whose preferred name is Afshar (the one listed first). To avoid a return value
with nested tables in it, specify a non-{{code|lua|nil}} value for the <code>flatten</code> parameter; in that case,
the return value would be {{code|lua|{"North Azerbaijani", "South Azerbaijani", "Afshar", "Afshari",
"Afshar Azerbaijani", "Afchar", "Qashqa'i", "Qashqai", "Kashkay", "Sonqor"} }}.
]==]
function Language:getVarieties(flatten)
self:loadInExtraData()
return require(language_like_module).getVarieties(self, flatten)
end
--[==[Returns a table of the "other names" that the language is known by, which are listed in the <code>otherNames</code> field. It should be noted that the <code>otherNames</code> field itself is deprecated, and entries listed there should eventually be moved to either <code>aliases</code> or <code>varieties</code>.]==]
function Language:getOtherNames() -- To be eventually removed, once there are no more uses of the `otherNames` field.
self:loadInExtraData()
return require(language_like_module).getOtherNames(self)
end
--[==[
Return a combined table of the canonical name, aliases, varieties and other names of a given language.]==]
function Language:getAllNames()
self:loadInExtraData()
return require(language_like_module).getAllNames(self)
end
--[==[Returns a table of types as a lookup table (with the types as keys).
The possible types are
* {language}: This is a language, either full or etymology-only.
* {full}: This is a "full" (not etymology-only) language, i.e. the union of {regular}, {reconstructed} and
{appendix-constructed}. Note that the types {full} and {etymology-only} also exist for families, so if you
want to check specifically for a full language and you have an object that might be a family, you should
use {{lua|hasType("language", "full")}} and not simply {{lua|hasType("full")}}.
* {etymology-only}: This is an etymology-only (not full) language, whose parent is another etymology-only
language or a full language. Note that the types {full} and {etymology-only} also exist for
families, so if you want to check specifically for an etymology-only language and you have an
object that might be a family, you should use {{lua|hasType("language", "etymology-only")}}
and not simply {{lua|hasType("etymology-only")}}.
* {regular}: This indicates a full language that is attested according to [[WT:CFI]] and therefore permitted
in the main namespace. There may also be reconstructed terms for the language, which are placed in
the {Reconstruction} namespace and must be prefixed with * to indicate a reconstruction. Most full
languages are natural (not constructed) languages, but a few constructed languages (e.g. Esperanto
and Volapük, among others) are also allowed in the mainspace and considered regular languages.
* {reconstructed}: This language is not attested according to [[WT:CFI]], and therefore is allowed only in the
{Reconstruction} namespace. All terms in this language are reconstructed, and must be prefixed
with *. Languages such as Proto-Indo-European and Proto-Germanic are in this category.
* {appendix-constructed}: This language is attested but does not meet the additional requirements set out for
constructed languages ([[WT:CFI#Constructed languages]]). Its entries must therefore
be in the Appendix namespace, but they are not reconstructed and therefore should
not have * prefixed in links.
]==]
function Language:getTypes()
local types = self._types
if types == nil then
types = {language = true}
if self:getFullCode() == self._code then
types.full = true
else
types["etymology-only"] = true
end
for t in gmatch(self._data.type, "[^,]+") do
types[t] = true
end
self._types = types
end
return types
end
--[==[Given a list of types as strings, returns true if the language has all of them.]==]
function Language:hasType(...)
Language.hasType = require(language_like_module).hasType
return self:hasType(...)
end
--[==[Returns a table containing <code>WikimediaLanguage</code> objects (see [[Module:wikimedia languages]]), which represent languages and their codes as they are used in Wikimedia projects for interwiki linking and such. More than one object may be returned, as a single Wiktionary language may correspond to multiple Wikimedia languages. For example, Wiktionary's single code <code>sh</code> (Serbo-Croatian) maps to four Wikimedia codes: <code>sh</code> (Serbo-Croatian), <code>bs</code> (Bosnian), <code>hr</code> (Croatian) and <code>sr</code> (Serbian).
The code for the Wikimedia language is retrieved from the <code>wikimedia_codes</code> property in the data modules. If that property is not present, the code of the current language is used. If none of the available codes is actually a valid Wikimedia code, an empty table is returned.]==]
function Language:getWikimediaLanguages()
local wm_langs = self._wikimediaLanguageObjects
if wm_langs == nil then
local codes = self:getWikimediaLanguageCodes()
wm_langs = {}
for i = 1, #codes do
wm_langs[i] = get_wikimedia_lang(codes[i])
end
self._wikimediaLanguageObjects = wm_langs
end
return wm_langs
end
function Language:getWikimediaLanguageCodes()
local wm_langs = self._wikimediaLanguageCodes
if wm_langs == nil then
wm_langs = self._data.wikimedia_codes
if wm_langs then
wm_langs = split(wm_langs, ",", true, true)
else
local code = self._code
if is_known_language_tag(code) then
wm_langs = {code}
else
-- Inherit, but only if no codes are specified in the data *and*
-- the language code isn't a valid Wikimedia language code.
local parent = self:getParent()
wm_langs = parent and parent:getWikimediaLanguageCodes() or {}
end
end
self._wikimediaLanguageCodes = wm_langs
end
return wm_langs
end
--[==[
Returns the name of the Wikipedia article for the language. `project` specifies the language and project to retrieve
the article from, defaulting to {"enwiki"} for the English Wikipedia. Normally if specified it should be the project
code for a specific-language Wikipedia e.g. "zhwiki" for the Chinese Wikipedia, but it can be any project, including
non-Wikipedia ones. If the project is the English Wikipedia and the property {wikipedia_article} is present in the data
module it will be used first. In all other cases, a sitelink will be generated from {:getWikidataItem} (if set). The
resulting value (or lack of value) is cached so that subsequent calls are fast. If no value could be determined, and
`noCategoryFallback` is {false}, {:getCategoryName} is used as fallback; otherwise, {nil} is returned. Note that if
`noCategoryFallback` is {nil} or omitted, it defaults to {false} if the project is the English Wikipedia, otherwise
to {true}. In other words, under normal circumstances, if the English Wikipedia article couldn't be retrieved, the
return value will fall back to a link to the language's category, but this won't normally happen for any other project.
]==]
function Language:getWikipediaArticle(noCategoryFallback, project)
Language.getWikipediaArticle = require(language_like_module).getWikipediaArticle
return self:getWikipediaArticle(noCategoryFallback, project)
end
function Language:makeWikipediaLink()
return make_link(self, "w:" .. self:getWikipediaArticle(), self:getCanonicalName())
end
--[==[Returns the name of the Wikimedia Commons category page for the language.]==]
function Language:getCommonsCategory()
Language.getCommonsCategory = require(language_like_module).getCommonsCategory
return self:getCommonsCategory()
end
--[==[Returns the Wikidata item id for the language or <code>nil</code>. This corresponds to the the second field in the data modules.]==]
function Language:getWikidataItem()
Language.getWikidataItem = require(language_like_module).getWikidataItem
return self:getWikidataItem()
end
--[==[Returns a table of <code>Script</code> objects for all scripts that the language is written in. See [[Module:scripts]].]==]
function Language:getScripts()
local scripts = self._scriptObjects
if scripts == nil then
local codes = self:getScriptCodes()
if codes[1] == "All" then
scripts = load_data(scripts_data_module)
else
scripts = {}
for i = 1, #codes do
scripts[i] = get_script(codes[i])
end
end
self._scriptObjects = scripts
end
return scripts
end
--[==[Returns the table of script codes in the language's data file.]==]
function Language:getScriptCodes()
local scripts = self._scriptCodes
if scripts == nil then
scripts = self._data[4]
if scripts then
local codes, n = {}, 0
for code in gmatch(scripts, "[^,]+") do
n = n + 1
-- Special handling of "Hants", which represents "Hani", "Hant" and "Hans" collectively.
if code == "Hants" then
codes[n] = "Hani"
codes[n + 1] = "Hant"
codes[n + 2] = "Hans"
n = n + 2
else
codes[n] = code
end
end
scripts = codes
else
scripts = {"None"}
end
self._scriptCodes = scripts
end
return scripts
end
--[==[Given some text, this function iterates through the scripts of a given language and tries to find the script that best matches the text. It returns a {{code|lua|Script}} object representing the script. If no match is found at all, it returns the {{code|lua|None}} script object.]==]
function Language:findBestScript(text, forceDetect)
if not text or text == "" or text == "-" then
return get_script("None")
end
-- Differs from table returned by getScriptCodes, as Hants is not normalized into its constituents.
local codes = self._bestScriptCodes
if codes == nil then
codes = self._data[4]
codes = codes and split(codes, ",", true, true) or {"None"}
self._bestScriptCodes = codes
end
local first_sc = codes[1]
if first_sc == "All" then
return find_best_script_without_lang(text)
end
local codes_len = #codes
if not (forceDetect or first_sc == "Hants" or codes_len > 1) then
first_sc = get_script(first_sc)
local charset = first_sc.characters
return charset and umatch(text, "[" .. charset .. "]") and first_sc or get_script("None")
end
-- Remove all formatting characters.
text = get_plaintext(text)
-- Remove all spaces and any ASCII punctuation. Some non-ASCII punctuation is script-specific, so can't be removed.
text = ugsub(text, "[%s!\"#%%&'()*,%-./:;?@[\\%]_{}]+", "")
if #text == 0 then
return get_script("None")
end
-- Try to match every script against the text,
-- and return the one with the most matching characters.
local bestcount, bestscript, length = 0
for i = 1, codes_len do
local sc = codes[i]
-- Special case for "Hants", which is a special code that represents whichever of "Hant" or "Hans" best matches, or "Hani" if they match equally. This avoids having to list all three. In addition, "Hants" will be treated as the best match if there is at least one matching character, under the assumption that a Han script is desirable in terms that contain a mix of Han and other scripts (not counting those which use Jpan or Kore).
if sc == "Hants" then
local Hani = get_script("Hani")
if not Hant_chars then
Hant_chars = load_data("Module:zh/data/ts")
Hans_chars = load_data("Module:zh/data/st")
end
local t, s, found = 0, 0
-- This is faster than using mw.ustring.gmatch directly.
for ch in gmatch((ugsub(text, "[" .. Hani.characters .. "]", "\255%0")), "\255(.[\128-\191]*)") do
found = true
if Hant_chars[ch] then
t = t + 1
if Hans_chars[ch] then
s = s + 1
end
elseif Hans_chars[ch] then
s = s + 1
else
t, s = t + 1, s + 1
end
end
if found then
if t == s then
return Hani
end
return get_script(t > s and "Hant" or "Hans")
end
else
sc = get_script(sc)
if not length then
length = ulen(text)
end
-- Count characters by removing everything in the script's charset and comparing to the original length.
local charset = sc.characters
local count = charset and length - ulen((ugsub(text, "[" .. charset .. "]+", ""))) or 0
if count >= length then
return sc
elseif count > bestcount then
bestcount = count
bestscript = sc
end
end
end
-- Return best matching script, or otherwise None.
return bestscript or get_script("None")
end
--[==[Returns a <code>Family</code> object for the language family that the language belongs to. See [[Module:families]].]==]
function Language:getFamily()
local family = self._familyObject
if family == nil then
family = self:getFamilyCode()
-- If the value is nil, it's cached as false.
family = family and get_family(family) or false
self._familyObject = family
end
return family or nil
end
--[==[Returns the family code in the language's data file.]==]
function Language:getFamilyCode()
local family = self._familyCode
if family == nil then
-- If the value is nil, it's cached as false.
family = self._data[3] or false
self._familyCode = family
end
return family or nil
end
function Language:getFamilyName()
local family = self._familyName
if family == nil then
family = self:getFamily()
-- If the value is nil, it's cached as false.
family = family and family:getCanonicalName() or false
self._familyName = family
end
return family or nil
end
do
local function check_family(self, family)
if type(family) == "table" then
family = family:getCode()
end
if self:getFamilyCode() == family then
return true
end
local self_family = self:getFamily()
if self_family:inFamily(family) then
return true
-- If the family isn't a real family (e.g. creoles) check any ancestors.
elseif self_family:inFamily("qfa-not") then
local ancestors = self:getAncestors()
for _, ancestor in ipairs(ancestors) do
if ancestor:inFamily(family) then
return true
end
end
end
end
--[==[Check whether the language belongs to `family` (which can be a family code or object). A list of objects can be given in place of `family`; in that case, return true if the language belongs to any of the specified families. Note that some languages (in particular, certain creoles) can have multiple immediate ancestors potentially belonging to different families; in that case, return true if the language belongs to any of the specified families.]==]
function Language:inFamily(...)
if self:getFamilyCode() == nil then
return false
end
return check_inputs(self, check_family, false, ...)
end
end
function Language:getParent()
local parent = self._parentObject
if parent == nil then
parent = self:getParentCode()
-- If the value is nil, it's cached as false.
parent = parent and get_by_code(parent, nil, true, true) or false
self._parentObject = parent
end
return parent or nil
end
function Language:getParentCode()
local parent = self._parentCode
if parent == nil then
-- If the value is nil, it's cached as false.
parent = self._data.parent or false
self._parentCode = parent
end
return parent or nil
end
function Language:getParentName()
local parent = self._parentName
if parent == nil then
parent = self:getParent()
-- If the value is nil, it's cached as false.
parent = parent and parent:getCanonicalName() or false
self._parentName = parent
end
return parent or nil
end
function Language:getParentChain()
local chain = self._parentChain
if chain == nil then
chain = {}
local parent, n = self:getParent(), 0
while parent do
n = n + 1
chain[n] = parent
parent = parent:getParent()
end
self._parentChain = chain
end
return chain
end
do
local function check_lang(self, lang)
for _, parent in ipairs(self:getParentChain()) do
if (type(lang) == "string" and lang or lang:getCode()) == parent:getCode() then
return true
end
end
end
function Language:hasParent(...)
return check_inputs(self, check_lang, false, ...)
end
end
--[==[
If the language is etymology-only, this iterates through parents until a full language or family is found, and the
corresponding object is returned. If the language is a full language, then it simply returns itself.
]==]
function Language:getFull()
local full = self._fullObject
if full == nil then
full = self:getFullCode()
full = full == self._code and self or get_by_code(full)
self._fullObject = full
end
return full
end
--[==[
If the language is an etymology-only language, this iterates through parents until a full language or family is
found, and the corresponding code is returned. If the language is a full language, then it simply returns the
language code.
]==]
function Language:getFullCode()
return self._fullCode or self._code
end
--[==[
If the language is an etymology-only language, this iterates through parents until a full language or family is
found, and the corresponding canonical name is returned. If the language is a full language, then it simply returns
the canonical name of the language.
]==]
function Language:getFullName()
local full = self._fullName
if full == nil then
full = self:getFull():getCanonicalName()
self._fullName = full
end
return full
end
--[==[Returns a table of <code class="nf">Language</code> objects for all languages that this language is directly descended from. Generally this is only a single language, but creoles, pidgins and mixed languages can have multiple ancestors.]==]
function Language:getAncestors()
local ancestors = self._ancestorObjects
if ancestors == nil then
ancestors = {}
local ancestor_codes = self:getAncestorCodes()
if #ancestor_codes > 0 then
for _, ancestor in ipairs(ancestor_codes) do
insert(ancestors, get_by_code(ancestor, nil, true))
end
else
local fam = self:getFamily()
local protoLang = fam and fam:getProtoLanguage() or nil
-- For the cases where the current language is the proto-language
-- of its family, or an etymology-only language that is ancestral to that
-- proto-language, we need to step up a level higher right from the
-- start.
if protoLang and (
protoLang:getCode() == self._code or
(self:hasType("etymology-only") and protoLang:hasAncestor(self))
) then
fam = fam:getFamily()
protoLang = fam and fam:getProtoLanguage() or nil
end
while not protoLang and not (not fam or fam:getCode() == "qfa-not") do
fam = fam:getFamily()
protoLang = fam and fam:getProtoLanguage() or nil
end
insert(ancestors, protoLang)
end
self._ancestorObjects = ancestors
end
return ancestors
end
do
-- Avoid a language being its own ancestor via class inheritance. We only need to check for this if the language has inherited an ancestor table from its parent, because we never want to drop ancestors that have been explicitly set in the data.
-- Recursively iterate over ancestors until we either find self or run out. If self is found, return true.
local function check_ancestor(self, lang)
local codes = lang:getAncestorCodes()
if not codes then
return nil
end
for i = 1, #codes do
local code = codes[i]
if code == self._code then
return true
end
local anc = get_by_code(code, nil, true)
if check_ancestor(self, anc) then
return true
end
end
end
--[==[Returns a table of <code class="nf">Language</code> codes for all languages that this language is directly descended from. Generally this is only a single language, but creoles, pidgins and mixed languages can have multiple ancestors.]==]
function Language:getAncestorCodes()
if self._ancestorCodes then
return self._ancestorCodes
end
local data = self._data
local codes = data.ancestors
if codes == nil then
codes = {}
self._ancestorCodes = codes
return codes
end
codes = split(codes, ",", true, true)
self._ancestorCodes = codes
-- If there are no codes or the ancestors weren't inherited data, there's nothing left to check.
if #codes == 0 or self:getData(false, "raw").ancestors ~= nil then
return codes
end
local i, code = 1
while i <= #codes do
code = codes[i]
if check_ancestor(self, self) then
remove(codes, i)
else
i = i + 1
end
end
return codes
end
end
--[==[Given a list of language objects or codes, returns true if at least one of them is an ancestor. This includes any etymology-only children of that ancestor. If the language's ancestor(s) are etymology-only languages, it will also return true for those language parent(s) (e.g. if Vulgar Latin is the ancestor, it will also return true for its parent, Latin). However, a parent is excluded from this if the ancestor is also ancestral to that parent (e.g. if Classical Persian is the ancestor, Persian would return false, because Classical Persian is also ancestral to Persian).]==]
function Language:hasAncestor(...)
local function iterateOverAncestorTree(node, func, parent_check)
local ancestors = node:getAncestors()
local ancestorsParents = {}
for _, ancestor in ipairs(ancestors) do
-- When checking the parents of the other language, and the ancestor is also a parent, skip to the next ancestor, so that we exclude any etymology-only children of that parent that are not directly related (see below).
local ret = (parent_check or not node:hasParent(ancestor)) and
func(ancestor) or iterateOverAncestorTree(ancestor, func, parent_check)
if ret then
return ret
end
end
-- Check the parents of any ancestors. We don't do this if checking the parents of the other language, so that we exclude any etymology-only children of those parents that are not directly related (e.g. if the ancestor is Vulgar Latin and we are checking New Latin, we want it to return false because they are on different ancestral branches. As such, if we're already checking the parent of New Latin (Latin) we don't want to compare it to the parent of the ancestor (Latin), as this would be a false positive; it should be one or the other).
if not parent_check then
return nil
end
for _, ancestor in ipairs(ancestors) do
local ancestorParents = ancestor:getParentChain()
for _, ancestorParent in ipairs(ancestorParents) do
if ancestorParent:getCode() == self._code or ancestorParent:hasAncestor(ancestor) then
break
else
insert(ancestorsParents, ancestorParent)
end
end
end
for _, ancestorParent in ipairs(ancestorsParents) do
local ret = func(ancestorParent)
if ret then
return ret
end
end
end
local function do_iteration(otherlang, parent_check)
-- otherlang can't be self
if (type(otherlang) == "string" and otherlang or otherlang:getCode()) == self._code then
return false
end
repeat
if iterateOverAncestorTree(
self,
function(ancestor)
return ancestor:getCode() == (type(otherlang) == "string" and otherlang or otherlang:getCode())
end,
parent_check
) then
return true
elseif type(otherlang) == "string" then
otherlang = get_by_code(otherlang, nil, true)
end
otherlang = otherlang:getParent()
parent_check = false
until not otherlang
end
local parent_check = true
for _, otherlang in ipairs{...} do
local ret = do_iteration(otherlang, parent_check)
if ret then
return true
end
end
return false
end
do
local function construct_node(lang, memo)
local branch, ancestors = {lang = lang:getCode()}
memo[lang:getCode()] = branch
for _, ancestor in ipairs(lang:getAncestors()) do
if ancestors == nil then
ancestors = {}
end
insert(ancestors, memo[ancestor:getCode()] or construct_node(ancestor, memo))
end
branch.ancestors = ancestors
return branch
end
function Language:getAncestorChain()
local chain = self._ancestorChain
if chain == nil then
chain = construct_node(self, {})
self._ancestorChain = chain
end
return chain
end
end
function Language:getAncestorChainOld()
local chain = self._ancestorChain
if chain == nil then
chain = {}
local step = self
while true do
local ancestors = step:getAncestors()
step = #ancestors == 1 and ancestors[1] or nil
if not step then
break
end
insert(chain, step)
end
self._ancestorChain = chain
end
return chain
end
local function fetch_descendants(self, fmt)
local descendants, family = {}, self:getFamily()
-- Iterate over all three datasets.
for _, data in ipairs{
require("Module:languages/code to canonical name"),
require("Module:etymology languages/code to canonical name"),
require("Module:families/code to canonical name"),
} do
for code in pairs(data) do
local lang = get_by_code(code, nil, true, true)
-- Test for a descendant. Earlier tests weed out most candidates, while the more intensive tests are only used sparingly.
if (
code ~= self._code and -- Not self.
lang:inFamily(family) and -- In the same family.
(
family:getProtoLanguageCode() == self._code or -- Self is the protolanguage.
self:hasDescendant(lang) or -- Full hasDescendant check.
(lang:getFullCode() == self._code and not self:hasAncestor(lang)) -- Etymology-only child which isn't an ancestor.
)
) then
if fmt == "object" then
insert(descendants, lang)
elseif fmt == "code" then
insert(descendants, code)
elseif fmt == "name" then
insert(descendants, lang:getCanonicalName())
end
end
end
end
return descendants
end
function Language:getDescendants()
local descendants = self._descendantObjects
if descendants == nil then
descendants = fetch_descendants(self, "object")
self._descendantObjects = descendants
end
return descendants
end
function Language:getDescendantCodes()
local descendants = self._descendantCodes
if descendants == nil then
descendants = fetch_descendants(self, "code")
self._descendantCodes = descendants
end
return descendants
end
function Language:getDescendantNames()
local descendants = self._descendantNames
if descendants == nil then
descendants = fetch_descendants(self, "name")
self._descendantNames = descendants
end
return descendants
end
do
local function check_lang(self, lang)
if type(lang) == "string" then
lang = get_by_code(lang, nil, true)
end
if lang:hasAncestor(self) then
return true
end
end
function Language:hasDescendant(...)
return check_inputs(self, check_lang, false, ...)
end
end
local function fetch_children(self, fmt)
local m_etym_data = require(etymology_languages_data_module)
local self_code, children = self._code, {}
for code, lang in pairs(m_etym_data) do
local _lang = lang
repeat
local parent = _lang.parent
if parent == self_code then
if fmt == "object" then
insert(children, get_by_code(code, nil, true))
elseif fmt == "code" then
insert(children, code)
elseif fmt == "name" then
insert(children, lang[1])
end
break
end
_lang = m_etym_data[parent]
until not _lang
end
return children
end
function Language:getChildren()
local children = self._childObjects
if children == nil then
children = fetch_children(self, "object")
self._childObjects = children
end
return children
end
function Language:getChildrenCodes()
local children = self._childCodes
if children == nil then
children = fetch_children(self, "code")
self._childCodes = children
end
return children
end
function Language:getChildrenNames()
local children = self._childNames
if children == nil then
children = fetch_children(self, "name")
self._childNames = children
end
return children
end
function Language:hasChild(...)
local lang = ...
if not lang then
return false
elseif type(lang) == "string" then
lang = get_by_code(lang, nil, true)
end
if lang:hasParent(self) then
return true
end
return self:hasChild(select(2, ...))
end
--[==[Returns the name of the main category of that language. Example: {{code|lua|"French language"}} for French, whose category is at [[:Category:French language]]. Unless optional argument <code>nocap</code> is given, the language name at the beginning of the returned value will be capitalized. This capitalization is correct for category names, but not if the language name is lowercase and the returned value of this function is used in the middle of a sentence.]==]
function Language:getCategoryName(nocap)
local name = self._categoryName
if name == nil then
name = self:getCanonicalName()
-- If a substrate, omit any leading article.
if self:getFamilyCode() == "qfa-sub" then
name = name:gsub("^the ", ""):gsub("^a ", "")
end
-- Only add " language" if a full language.
if self:hasType("full") then
-- Unless the canonical name already ends with "language", "lect" or their derivatives, add " language".
if not (match(name, "[Ll]anguage$") or match(name, "[Ll]ect$")) then
name = name .. " language"
end
end
self._categoryName = name
end
if nocap then
return name
end
return mw.getContentLanguage():ucfirst(name)
end
--[==[Creates a link to the category; the link text is the canonical name.]==]
function Language:makeCategoryLink()
return make_link(self, ":Category:" .. self:getCategoryName(), self:getDisplayForm())
end
function Language:getStandardCharacters(sc)
local standard_chars = self._data.standard_chars
if type(standard_chars) ~= "table" then
return standard_chars
elseif sc and type(sc) ~= "string" then
check_object("script", nil, sc)
sc = sc:getCode()
end
if (not sc) or sc == "None" then
local scripts = {}
for _, script in pairs(standard_chars) do
insert(scripts, script)
end
return concat(scripts)
end
if standard_chars[sc] then
return standard_chars[sc] .. (standard_chars[1] or "")
end
end
--[==[
Strip diacritics from display text `text` (in a language-specific fashion), which is in the script `sc`. If `sc` is
omitted or {nil}, the script is autodetected. This also strips certain punctuation characters from the end and (in the
case of Spanish upside-down question mark and exclamation points) from the beginning; strips any whitespace at the
end of the text or between the text and final stripped punctuation characters; and applies some language-specific
Unicode normalizations to replace discouraged characters with their prescribed alternatives. Return the stripped text.
]==]
function Language:stripDiacritics(text, sc)
if (not text) or text == "" then
return text
end
sc = checkScript(text, self, sc)
text = normalize(text, sc)
-- FIXME, rename makeEntryName to stripDiacritics and get rid of second and third return values
-- everywhere
text, _, _ = iterateSectionSubstitutions(self, text, sc, nil, nil,
self._data.strip_diacritics or self._data.entry_name, "strip_diacritics", "stripDiacritics")
text = umatch(text, "^[¿¡]?(.-[^%s%p].-)%s*[؟?!;՛՜ ՞ ՟?!︖︕।॥။၊་།]?$") or text
return text
end
--[==[
Convert a ''logical'' pagename (the pagename as it appears to the user, after diacritics and punctuation have been
stripped) to a ''physical'' pagename (the pagename as it appears in the MediaWiki database). Reasons for a difference
between the two are (a) unsupported titles such as `[ ]` (with square brackets in them), `#` (pound/hash sign) and
`¯\_(ツ)_/¯` (with underscores), as well as overly long titles of various sorts; (b) "mammoth" pages that are split into
parts (e.g. `a`, which is split into physical pagenames `a/languages A to L` and `a/languages M to Z`). For almost all
purposes, you should work with logical and not physical pagenames. But there are certain use cases that require physical
pagenames, such as checking the existence of a page or retrieving a page's contents.
`pagename` is the logical pagename to be converted. `is_reconstructed_or_appendix` indicates whether the page is in the
`Reconstruction` or `Appendix` namespaces. If it is omitted or has the value {nil}, the pagename is checked for an
initial asterisk, and if found, the page is assumed to be a `Reconstruction` page. Setting a value of `false` or `true`
to `is_reconstructed_or_appendix` disables this check and allows for mainspace pagenames that begin with an asterisk.
]==]
function Language:logicalToPhysical(pagename, is_reconstructed_or_appendix)
-- FIXME: This probably shouldn't happen but it happens when makeEntryName() receives nil.
if pagename == nil then
track("nil-passed-to-logicalToPhysical")
return nil
end
local initial_asterisk
if is_reconstructed_or_appendix == nil then
local pagename_minus_initial_asterisk
initial_asterisk, pagename_minus_initial_asterisk = pagename:match("^(%*)(.*)$")
if pagename_minus_initial_asterisk then
is_reconstructed_or_appendix = true
pagename = pagename_minus_initial_asterisk
elseif self:hasType("appendix-constructed") then
is_reconstructed_or_appendix = true
end
end
if not is_reconstructed_or_appendix then
-- Check if the pagename is a listed unsupported title.
local unsupportedTitles = load_data(links_data_module).unsupported_titles
if unsupportedTitles[pagename] then
return "Unsupported titles/" .. unsupportedTitles[pagename]
end
end
-- Set `unsupported` as true if certain conditions are met.
local unsupported
-- Check if there's an unsupported character. \239\191\189 is the replacement character U+FFFD, which can't be typed
-- directly here due to an abuse filter. Unix-style dot-slash notation is also unsupported, as it is used for
-- relative paths in links, as are 3 or more consecutive tildes. Note: match is faster with magic
-- characters/charsets; find is faster with plaintext.
if (
match(pagename, "[#<>%[%]_{|}]") or
find(pagename, "\239\191\189") or
match(pagename, "%f[^%z/]%.%.?%f[%z/]") or
find(pagename, "~~~")
) then
unsupported = true
-- If it looks like an interwiki link.
elseif find(pagename, ":") then
local prefix = gsub(pagename, "^:*(.-):.*", ulower)
if (
load_data("Module:data/namespaces")[prefix] or
load_data("Module:data/interwikis")[prefix]
) then
unsupported = true
end
end
-- Escape unsupported characters so they can be used in titles. ` is used as a delimiter for this, so a raw use of
-- it in an unsupported title is also escaped here to prevent interference; this is only done with unsupported
-- titles, though, so inclusion won't in itself mean a title is treated as unsupported (which is why it's excluded
-- from the earlier test).
if unsupported then
-- FIXME: This conversion needs to be different for reconstructed pages with unsupported characters. There
-- aren't any currently, but if there ever are, we need to fix this e.g. to put them in something like
-- Reconstruction:Proto-Indo-European/Unsupported titles/`lowbar``num`.
local unsupported_characters = load_data(links_data_module).unsupported_characters
pagename = pagename:gsub("[#<>%[%]_`{|}\239]\191?\189?", unsupported_characters)
:gsub("%f[^%z/]%.%.?%f[%z/]", function(m)
return (gsub(m, "%.", "`period`"))
end)
:gsub("~~~+", function(m)
return (gsub(m, "~", "`tilde`"))
end)
pagename = "Unsupported titles/" .. pagename
elseif not is_reconstructed_or_appendix then
-- Check if this is a mammoth page. If so, which subpage should we link to?
local m_links_data = load_data(links_data_module)
local mammoth_page_type = m_links_data.mammoth_pages[pagename]
if mammoth_page_type then
local canonical_name = self:getFullName()
if canonical_name ~= "Translingual" and canonical_name ~= "English" then
local this_subpage
local L2_sort_key = get_L2_sort_key(canonical_name)
for _, subpage_spec in ipairs(m_links_data.mammoth_page_subpage_types[mammoth_page_type]) do
-- unpack() fails utterly on data loaded using mw.loadData() even if offsets are given
local subpage, pattern = subpage_spec[1], subpage_spec[2]
if pattern == true or L2_sort_key:match(pattern) then
this_subpage = subpage
break
end
end
if not this_subpage then
error(("Internal error: Bad data in mammoth_page_subpage_pages in [[Module:links/data]] for mammoth page %s, type %s; last entry didn't have 'true' in it"):format(
pagename, mammoth_page_type))
end
pagename = pagename .. "/" .. this_subpage
end
end
end
return (initial_asterisk or "") .. pagename
end
--[==[
Strip the diacritics from a display pagename and convert the resulting logical pagename into a physical pagename.
This allows you, for example, to retrieve the contents of the page or check its existence. WARNING: This is deprecated
and will be going away. It is a simple composition of `self:stripDiacritics` and `self:logicalToPhysical`; most callers
only want the former, and if you need both, call them both yourself.
`text` and `sc` are as in `self:stripDiacritics`, and `is_reconstructed_or_appendix` is as in `self:logicalToPhysical`.
]==]
function Language:makeEntryName(text, sc, is_reconstructed_or_appendix)
return self:logicalToPhysical(self:stripDiacritics(text, sc), is_reconstructed_or_appendix)
end
--[==[Generates alternative forms using a specified method, and returns them as a table. If no method is specified, returns a table containing only the input term.]==]
function Language:generateForms(text, sc)
local generate_forms = self._data.generate_forms
if generate_forms == nil then
return {text}
end
sc = checkScript(text, self, sc)
return require("Module:" .. self._data.generate_forms).generateForms(text, self, sc)
end
--[==[Creates a sort key for the given stripped text, following the rules appropriate for the language. This removes
diacritical marks from the stripped text if they are not considered significant for sorting, and may perform some other
changes. Any initial hyphen is also removed, and anything in parentheses is removed as well.
The <code>sort_key</code> setting for each language in the data modules defines the replacements made by this function, or it gives the name of the module that takes the stripped text and returns a sortkey.]==]
function Language:makeSortKey(text, sc)
if (not text) or text == "" then
return text
end
if match(text, "<[^<>]+>") then
track("track HTML tag")
end
-- Remove directional characters, bold, italics, soft hyphens, strip markers and HTML tags.
-- FIXME: Partly duplicated with remove_formatting() in [[Module:links]].
text = ugsub(text, "[\194\173\226\128\170-\226\128\174\226\129\166-\226\129\169]", "")
text = text:gsub("('*)'''(.-'*)'''", "%1%2"):gsub("('*)''(.-'*)''", "%1%2")
text = gsub(unstrip(text), "<[^<>]+>", "")
text = decode_uri(text, "PATH")
text = checkNoEntities(self, text)
-- Remove initial hyphens and * unless the term only consists of spacing + punctuation characters.
text = ugsub(text, "^([-]*)[-־ـ᠊*]+([-]*)(.*[^%s%p].*)", "%1%2%3")
sc = checkScript(text, self, sc)
text = normalize(text, sc)
text = removeCarets(text, sc)
-- For languages with dotted dotless i, ensure that "İ" is sorted as "i", and "I" is sorted as "ı".
if self:hasDottedDotlessI() then
text = gsub(text, "I\204\135", "i") -- decomposed "İ"
:gsub("I", "ı")
text = sc:toFixedNFD(text)
end
-- Convert to lowercase, make the sortkey, then convert to uppercase. Where the language has dotted dotless i, it is
-- usually not necessary to convert "i" to "İ" and "ı" to "I" first, because "I" will always be interpreted as
-- conventional "I" (not dotless "İ") by any sorting algorithms, which will have been taken into account by the
-- sortkey substitutions themselves. However, if no sortkey substitutions have been specified, then conversion is
-- necessary so as to prevent "i" and "ı" both being sorted as "I".
--
-- An exception is made for scripts that (sometimes) sort by scraping page content, as that means they are sensitive
-- to changes in capitalization (as it changes the target page).
if not sc:sortByScraping() then
text = ulower(text)
end
local actual_substitution_data
-- Don't trim whitespace here because it's significant at the beginning of a sort key or sort base.
text, _, actual_substitution_data = iterateSectionSubstitutions(self, text, sc, nil, nil, self._data.sort_key,
"sort_key", "makeSortKey", "notrim")
if not sc:sortByScraping() then
if self:hasDottedDotlessI() and not actual_substitution_data then
text = text:gsub("ı", "I"):gsub("i", "İ")
text = sc:toFixedNFC(text)
end
text = uupper(text)
end
-- Remove parentheses, as long as they are either preceded or followed by something.
text = gsub(text, "(.)[()]+", "%1"):gsub("[()]+(.)", "%1")
text = escape_risky_characters(text)
return text
end
--[==[Create the form used as as a basis for display text and transliteration. FIXME: Rename to correctInputText().]==]
local function processDisplayText(text, self, sc, keepCarets, keepPrefixes)
local subbedChars = {}
text, subbedChars = doTempSubstitutions(text, subbedChars, keepCarets)
text = decode_uri(text, "PATH")
text = checkNoEntities(self, text)
sc = checkScript(text, self, sc)
text = normalize(text, sc)
text, subbedChars = iterateSectionSubstitutions(self, text, sc, subbedChars, keepCarets, self._data.display_text,
"display_text", "makeDisplayText")
text = removeCarets(text, sc)
-- Remove any interwiki link prefixes (unless they have been escaped or this has been disabled).
if find(text, ":") and not keepPrefixes then
local rep
repeat
text, rep = gsub(text, "\\\\(\\*:)", "\3%1")
until rep == 0
text = gsub(text, "\\:", "\4")
while true do
local prefix = gsub(text, "^(.-):.+", function(m1)
return (gsub(m1, "\244[\128-\191]*", ""))
end)
-- Check if the prefix is an interwiki, though ignore capitalised Wiktionary:, which is a namespace.
if not prefix or prefix == text or prefix == "Wiktionary"
or not (load_data("Module:data/interwikis")[ulower(prefix)] or prefix == "") then
break
end
text = gsub(text, "^(.-):(.*)", function(m1, m2)
local ret = {}
for subbedChar in gmatch(m1, "\244[\128-\191]*") do
insert(ret, subbedChar)
end
return concat(ret) .. m2
end)
end
text = gsub(text, "\3", "\\"):gsub("\4", ":")
end
return text, subbedChars
end
--[==[Make the display text (i.e. what is displayed on the page).]==]
function Language:makeDisplayText(text, sc, keepPrefixes)
if not text or text == "" then
return text
end
local subbedChars
text, subbedChars = processDisplayText(text, self, sc, nil, keepPrefixes)
text = escape_risky_characters(text)
return undoTempSubstitutions(text, subbedChars)
end
--[==[Transliterates the text from the given script into the Latin script (see
[[Wiktionary:Transliteration and romanization]]). The language must have the <code>translit</code> property for this to
work; if it is not present, {{code|lua|nil}} is returned.
The <code>sc</code> parameter is handled by the transliteration module, and how it is handled is specific to that
module. Some transliteration modules may tolerate {{code|lua|nil}} as the script, others require it to be one of the
possible scripts that the module can transliterate, and will throw an error if it's not one of them. For this reason,
the <code>sc</code> parameter should always be provided when writing non-language-specific code.
The <code>module_override</code> parameter is used to override the default module that is used to provide the
transliteration. This is useful in cases where you need to demonstrate a particular module in use, but there is no
default module yet, or you want to demonstrate an alternative version of a transliteration module before making it
official. It should not be used in real modules or templates, only for testing. All uses of this parameter are tracked
by [[Wiktionary:Tracking/languages/module_override]].
'''Known bugs''':
* This function assumes {tr(s1) .. tr(s2) == tr(s1 .. s2)}. When this assertion fails, wikitext markups like <nowiki>'''</nowiki> can cause wrong transliterations.
* HTML entities like <code>&apos;</code>, often used to escape wikitext markups, do not work.
]==]
function Language:transliterate(text, sc, module_override)
-- If there is no text, or the language doesn't have transliteration data and there's no override, return nil.
if not text or text == "" or text == "-" then
return text
end
-- If the script is not transliteratable (and no override is given), return nil.
sc = checkScript(text, self, sc)
if not (sc:isTransliterated() or module_override) then
-- temporary tracking to see if/when this gets triggered
track("non-transliterable")
track("non-transliterable/" .. self._code)
track("non-transliterable/" .. sc:getCode())
track("non-transliterable/" .. sc:getCode() .. "/" .. self._code)
return nil
end
-- Remove any strip markers.
text = unstrip(text)
-- Do not process the formatting into PUA characters for certain languages.
local processed = load_data(languages_data_module).substitution[self._code] ~= "none"
-- Get the display text with the keepCarets flag set.
local subbedChars
if processed then
text, subbedChars = processDisplayText(text, self, sc, true)
end
-- Transliterate (using the module override if applicable).
text, subbedChars = iterateSectionSubstitutions(self, text, sc, subbedChars, true, module_override or
self._data.translit, "translit", "tr")
if not text then
return nil
end
-- Incomplete transliterations return nil.
local charset = sc.characters
if charset and umatch(text, "[" .. charset .. "]") then
-- Remove any characters in Latin, which includes Latin characters also included in other scripts (as these are
-- false positives), as well as any PUA substitutions. Anything remaining should only be script code "None"
-- (e.g. numerals).
local check_text = ugsub(text, "[" .. get_script("Latn").characters .. "-]+", "")
-- Set none_is_last_resort_only flag, so that any non-None chars will cause a script other than "None" to be
-- returned.
if find_best_script_without_lang(check_text, true):getCode() ~= "None" then
return nil
end
end
if processed then
text = escape_risky_characters(text)
text = undoTempSubstitutions(text, subbedChars)
end
-- If the script does not use capitalization, then capitalize any letters of the transliteration which are
-- immediately preceded by a caret (and remove the caret).
if text and not sc:hasCapitalization() and text:find("^", 1, true) then
text = processCarets(text, "%^([\128-\191\244]*%*?)([^\128-\191\244][\128-\191]*)", function(m1, m2)
return m1 .. uupper(m2)
end)
end
-- Track module overrides.
if module_override ~= nil then
track("module_override")
end
return text
end
do
local function handle_language_spec(self, spec, sc)
local ret = self["_" .. spec]
if ret == nil then
ret = self._data[spec]
if type(ret) == "string" then
ret = list_to_set(split(ret, ",", true, true))
end
self["_" .. spec] = ret
end
if type(ret) == "table" then
ret = ret[sc:getCode()]
end
return not not ret
end
function Language:overrideManualTranslit(sc)
return handle_language_spec(self, "override_translit", sc)
end
function Language:link_tr(sc)
return handle_language_spec(self, "link_tr", sc)
end
end
--[==[Returns {{code|lua|true}} if the language has a transliteration module, or {{code|lua|false}} if it doesn't.]==]
function Language:hasTranslit()
return not not self._data.translit
end
--[==[Returns {{code|lua|true}} if the language uses the letters I/ı and İ/i, or {{code|lua|false}} if it doesn't.]==]
function Language:hasDottedDotlessI()
return not not self._data.dotted_dotless_i
end
function Language:toJSON(opts)
local strip_diacritics, strip_diacritics_patterns, strip_diacritics_remove_diacritics = self._data.strip_diacritics
if strip_diacritics then
if strip_diacritics.from then
strip_diacritics_patterns = {}
for i, from in ipairs(strip_diacritics.from) do
insert(strip_diacritics_patterns, {from = from, to = strip_diacritics.to[i] or ""})
end
end
strip_diacritics_remove_diacritics = strip_diacritics.remove_diacritics
end
-- mainCode should only end up non-nil if dontCanonicalizeAliases is passed to make_object().
-- props should either contain zero-argument functions to compute the value, or the value itself.
local props = {
ancestors = function() return self:getAncestorCodes() end,
canonicalName = function() return self:getCanonicalName() end,
categoryName = function() return self:getCategoryName("nocap") end,
code = self._code,
mainCode = self._mainCode,
parent = function() return self:getParentCode() end,
full = function() return self:getFullCode() end,
stripDiacriticsPatterns = strip_diacritics_patterns,
stripDiacriticsRemoveDiacritics = strip_diacritics_remove_diacritics,
family = function() return self:getFamilyCode() end,
aliases = function() return self:getAliases() end,
varieties = function() return self:getVarieties() end,
otherNames = function() return self:getOtherNames() end,
scripts = function() return self:getScriptCodes() end,
type = function() return keys_to_list(self:getTypes()) end,
wikimediaLanguages = function() return self:getWikimediaLanguageCodes() end,
wikidataItem = function() return self:getWikidataItem() end,
wikipediaArticle = function() return self:getWikipediaArticle(true) end,
}
local ret = {}
for prop, val in pairs(props) do
if not opts.skip_fields or not opts.skip_fields[prop] then
if type(val) == "function" then
ret[prop] = val()
else
ret[prop] = val
end
end
end
-- Use `deep_copy` when returning a table, so that there are no editing restrictions imposed by `mw.loadData`.
return opts and opts.lua_table and deep_copy(ret) or to_json(ret, opts)
end
function export.getDataModuleName(code)
local letter = match(code, "^(%l)%l%l?$")
return "Module:" .. (
letter == nil and "languages/data/exceptional" or
#code == 2 and "languages/data/2" or
"languages/data/3/" .. letter
)
end
get_data_module_name = export.getDataModuleName
function export.getExtraDataModuleName(code)
return get_data_module_name(code) .. "/extra"
end
get_extra_data_module_name = export.getExtraDataModuleName
do
local function make_stack(data)
local key_types = {
[2] = "unique",
aliases = "unique",
otherNames = "unique",
type = "append",
varieties = "unique",
wikipedia_article = "unique",
wikimedia_codes = "unique"
}
local function __index(self, k)
local stack, key_type = getmetatable(self), key_types[k]
-- Data that isn't inherited from the parent.
if key_type == "unique" then
local v = stack[stack[make_stack]][k]
if v == nil then
local layer = stack[0]
if layer then -- Could be false if there's no extra data.
v = layer[k]
end
end
return v
-- Data that is appended by each generation.
elseif key_type == "append" then
local parts, offset, n = {}, 0, stack[make_stack]
for i = 1, n do
local part = stack[i][k]
if part == nil then
offset = offset + 1
else
parts[i - offset] = part
end
end
return offset ~= n and concat(parts, ",") or nil
end
local n = stack[make_stack]
while true do
local layer = stack[n]
if not layer then -- Could be false if there's no extra data.
return nil
end
local v = layer[k]
if v ~= nil then
return v
end
n = n - 1
end
end
local function __newindex()
error("table is read-only")
end
local function __pairs(self)
-- Iterate down the stack, caching keys to avoid duplicate returns.
local stack, seen = getmetatable(self), {}
local n = stack[make_stack]
local iter, state, k, v = pairs(stack[n])
return function()
repeat
repeat
k = iter(state, k)
if k == nil then
n = n - 1
local layer = stack[n]
if not layer then -- Could be false if there's no extra data.
return nil
end
iter, state, k = pairs(layer)
end
until not (k == nil or seen[k])
-- Get the value via a lookup, as the one returned by the
-- iterator will be the raw value from the current layer,
-- which may not be the one __index will return for that
-- key. Also memoize the key in `seen` (even if the lookup
-- returns nil) so that it doesn't get looked up again.
-- TODO: store values in `self`, avoiding the need to create
-- the `seen` table. The iterator will need to iterate over
-- `self` with `next` first to find these on future loops.
v, seen[k] = self[k], true
until v ~= nil
return k, v
end
end
local __ipairs = require(table_module).indexIpairs
function make_stack(data)
local stack = {
data,
[make_stack] = 1, -- stores the length and acts as a sentinel to confirm a given metatable is a stack.
__index = __index,
__newindex = __newindex,
__pairs = __pairs,
__ipairs = __ipairs,
}
stack.__metatable = stack
return setmetatable({}, stack), stack
end
return make_stack(data)
end
local function get_stack(data)
local stack = getmetatable(data)
return stack and type(stack) == "table" and stack[make_stack] and stack or nil
end
--[==[
<span style="color: var(--wikt-palette-red,#BA0000)">This function is not for use in entries or other content pages.</span>
Returns a blob of data about the language. The format of this blob is undocumented, and perhaps unstable; it's intended for things like the module's own unit-tests, which are "close friends" with the module and will be kept up-to-date as the format changes. If `extra` is set, any extra data in the relevant `/extra` module will be included. (Note that it will be included anyway if it has already been loaded into the language object.) If `raw` is set, then the returned data will not contain any data inherited from parent objects.
-- Do NOT use these methods!
-- All uses should be pre-approved on the talk page!
]==]
function Language:getData(extra, raw)
if extra then
self:loadInExtraData()
end
local data = self._data
-- If raw is not set, just return the data.
if not raw then
return data
end
local stack = get_stack(data)
-- If there isn't a stack or its length is 1, return the data. Extra data (if any) will be included, as it's stored at key 0 and doesn't affect the reported length.
if stack == nil then
return data
end
local n = stack[make_stack]
if n == 1 then
return data
end
local extra = stack[0]
-- If there isn't any extra data, return the top layer of the stack.
if extra == nil then
return stack[n]
end
-- If there is, return a new stack which has the top layer at key 1 and the extra data at key 0.
data, stack = make_stack(stack[n])
stack[0] = extra
return data
end
function Language:loadInExtraData()
-- Only full languages have extra data.
if not self:hasType("language", "full") then
return
end
local data = self._data
-- If there's no stack, create one.
local stack = get_stack(self._data)
if stack == nil then
data, stack = make_stack(data)
-- If already loaded, return.
elseif stack[0] ~= nil then
return
end
self._data = data
-- Load extra data from the relevant module and add it to the stack at key 0, so that the __index and __pairs metamethods will pick it up, since they iterate down the stack until they run out of layers.
local code = self._code
local modulename = get_extra_data_module_name(code)
-- No data cached as false.
stack[0] = modulename and load_data(modulename)[code] or false
end
--[==[Returns the name of the module containing the language's data. Currently, this is always [[Module:scripts/data]].]==]
function Language:getDataModuleName()
local name = self._dataModuleName
if name == nil then
name = self:hasType("etymology-only") and etymology_languages_data_module or
get_data_module_name(self._mainCode or self._code)
self._dataModuleName = name
end
return name
end
--[==[Returns the name of the module containing the language's data. Currently, this is always [[Module:scripts/data]].]==]
function Language:getExtraDataModuleName()
local name = self._extraDataModuleName
if name == nil then
name = not self:hasType("etymology-only") and get_extra_data_module_name(self._mainCode or self._code) or false
self._extraDataModuleName = name
end
return name or nil
end
function export.makeObject(code, data, dontCanonicalizeAliases)
local data_type = type(data)
if data_type ~= "table" then
error(("bad argument #2 to 'makeObject' (table expected, got %s)"):format(data_type))
end
-- Convert any aliases.
local input_code = code
code = normalize_code(code)
input_code = dontCanonicalizeAliases and input_code or code
local parent
if data.parent then
parent = get_by_code(data.parent, nil, true, true)
else
parent = Language
end
parent.__index = parent
local lang = {_code = input_code}
-- This can only happen if dontCanonicalizeAliases is passed to make_object().
if code ~= input_code then
lang._mainCode = code
end
local parent_data = parent._data
if parent_data == nil then
-- Full code is the same as the code.
lang._fullCode = parent._code or code
else
-- Copy full code.
lang._fullCode = parent._fullCode
local stack = get_stack(parent_data)
if stack == nil then
parent_data, stack = make_stack(parent_data)
end
-- Insert the input data as the new top layer of the stack.
local n = stack[make_stack] + 1
data, stack[n], stack[make_stack] = parent_data, data, n
end
lang._data = data
return setmetatable(lang, parent)
end
make_object = export.makeObject
end
--[==[Finds the language whose code matches the one provided. If it exists, it returns a <code class="nf">Language</code> object representing the language. Otherwise, it returns {{code|lua|nil}}, unless <code class="n">paramForError</code> is given, in which case an error is generated. If <code class="n">paramForError</code> is {{code|lua|true}}, a generic error message mentioning the bad code is generated; otherwise <code class="n">paramForError</code> should be a string or number specifying the parameter that the code came from, and this parameter will be mentioned in the error message along with the bad code. If <code class="n">allowEtymLang</code> is specified, etymology-only language codes are allowed and looked up along with normal language codes. If <code class="n">allowFamily</code> is specified, language family codes are allowed and looked up along with normal language codes.]==]
function export.getByCode(code, paramForError, allowEtymLang, allowFamily)
-- Track uses of paramForError, ultimately so it can be removed, as error-handling should be done by [[Module:parameters]], not here.
if paramForError ~= nil then
track("paramForError")
end
if type(code) ~= "string" then
local typ
if not code then
typ = "nil"
elseif check_object("language", true, code) then
typ = "a language object"
elseif check_object("family", true, code) then
typ = "a family object"
else
typ = "a " .. type(code)
end
error("The function getByCode expects a string as its first argument, but received " .. typ .. ".")
end
local m_data = load_data(languages_data_module)
if m_data.aliases[code] or m_data.track[code] then
track(code)
end
local norm_code = normalize_code(code)
-- Get the data, checking for etymology-only languages if allowEtymLang is set.
local data = load_data(get_data_module_name(norm_code))[norm_code] or
allowEtymLang and load_data(etymology_languages_data_module)[norm_code]
-- If no data was found and allowFamily is set, check the family data. If the main family data was found, make the object with [[Module:families]] instead, as family objects have different methods. However, if it's an etymology-only family, use make_object in this module (which handles object inheritance), and the family-specific methods will be inherited from the parent object.
if data == nil and allowFamily then
data = load_data("Module:families/data")[norm_code]
if data ~= nil then
if data.parent == nil then
return make_family_object(norm_code, data)
elseif not allowEtymLang then
data = nil
end
end
end
local retval = code and data and make_object(code, data)
if not retval and paramForError then
require("Module:languages/errorGetBy").code(code, paramForError, allowEtymLang, allowFamily)
end
return retval
end
get_by_code = export.getByCode
--[==[Finds the language whose canonical name (the name used to represent that language on Wiktionary) or other name matches the one provided. If it exists, it returns a <code class="nf">Language</code> object representing the language. Otherwise, it returns {{code|lua|nil}}, unless <code class="n">paramForError</code> is given, in which case an error is generated. If <code class="n">allowEtymLang</code> is specified, etymology-only language codes are allowed and looked up along with normal language codes. If <code class="n">allowFamily</code> is specified, language family codes are allowed and looked up along with normal language codes.
The canonical name of languages should always be unique (it is an error for two languages on Wiktionary to share the same canonical name), so this is guaranteed to give at most one result.
This function is powered by [[Module:languages/canonical names]], which contains a pre-generated mapping of full-language canonical names to codes. It is generated by going through the [[:Category:Language data modules]] for full languages. When <code class="n">allowEtymLang</code> is specified for the above function, [[Module:etymology languages/canonical names]] may also be used, and when <code class="n">allowFamily</code> is specified for the above function, [[Module:families/canonical names]] may also be used.]==]
function export.getByCanonicalName(name, errorIfInvalid, allowEtymLang, allowFamily)
local byName = load_data("Module:languages/canonical names")
local code = byName and byName[name]
if not code and allowEtymLang then
byName = load_data("Module:etymology languages/canonical names")
code = byName and byName[name] or
byName[gsub(name, " [Ss]ubstrate$", "")] or
byName[gsub(name, "^a ", "")] or
byName[gsub(name, "^a ", ""):gsub(" [Ss]ubstrate$", "")] or
-- For etymology families like "ira-pro".
-- FIXME: This is not ideal, as it allows " languages" to be appended to any etymology-only language, too.
byName[match(name, "^(.*) languages$")]
end
if not code and allowFamily then
byName = load_data("Module:families/canonical names")
code = byName[name] or byName[match(name, "^(.*) languages$")]
end
local retval = code and get_by_code(code, errorIfInvalid, allowEtymLang, allowFamily)
if not retval and errorIfInvalid then
require("Module:languages/errorGetBy").canonicalName(name, allowEtymLang, allowFamily)
end
return retval
end
--[==[Used by [[Module:languages/data/2]] (et al.) and [[Module:etymology languages/data]], [[Module:families/data]], [[Module:scripts/data]] and [[Module:writing systems/data]] to finalize the data into the format that is actually returned.]==]
function export.finalizeData(data, main_type, variety)
local fields = {"type"}
if main_type == "language" then
insert(fields, 4) -- script codes
insert(fields, "ancestors")
insert(fields, "link_tr")
insert(fields, "override_translit")
insert(fields, "wikimedia_codes")
elseif main_type == "script" then
insert(fields, 3) -- writing system codes
end -- Families and writing systems have no extra fields to process.
local fields_len = #fields
for _, entity in next, data do
if variety then
-- Move parent from 3 to "parent" and family from "family" to 3. These are different for the sake of convenience, since very few varieties have the family specified, whereas all of them have a parent.
entity.parent, entity[3], entity.family = entity[3], entity.family
-- Give the type "regular" iff not a variety and no other types are assigned.
elseif not (entity.type or entity.parent) then
entity.type = "regular"
end
for i = 1, fields_len do
local key = fields[i]
local field = entity[key]
if field and type(field) == "string" then
entity[key] = gsub(field, "%s*,%s*", ",")
end
end
end
return data
end
--[==[For backwards compatibility only; modules should require the error themselves.]==]
function export.err(lang_code, param, code_desc, template_tag, not_real_lang)
return require("Module:languages/error")(lang_code, param, code_desc, template_tag, not_real_lang)
end
return export
hen6s6wbv7hthor9k3teh6x7me72099
মডিউল:script utilities
828
6290
507796
324003
2026-04-14T07:01:33Z
Redmin
6857
[[en:Module:script utilities|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507796
Scribunto
text/plain
local export = {}
local anchors_module = "Module:anchors"
local debug_track_module = "Module:debug/track"
local links_module = "Module:links"
local munge_text_module = "Module:munge text"
local parameters_module = "Module:parameters"
local scripts_module = "Module:scripts"
local string_utilities_module = "Module:string utilities"
local utilities_module = "Module:utilities"
local concat = table.concat
local insert = table.insert
local require = require
local toNFD = mw.ustring.toNFD
local dump = mw.dumpObject
--[==[
Loaders for functions in other modules, which overwrite themselves with the target function when called. This ensures modules are only loaded when needed, retains the speed/convenience of locally-declared pre-loaded functions, and has no overhead after the first call, since the target functions are called directly in any subsequent calls.]==]
local function embedded_language_links(...)
embedded_language_links = require(links_module).embedded_language_links
return embedded_language_links(...)
end
local function find_best_script_without_lang(...)
find_best_script_without_lang = require(scripts_module).findBestScriptWithoutLang
return find_best_script_without_lang(...)
end
local function format_categories(...)
format_categories = require(utilities_module).format_categories
return format_categories(...)
end
local function get_script(...)
get_script = require(scripts_module).getByCode
return get_script(...)
end
local function language_anchor(...)
language_anchor = require(anchors_module).language_anchor
return language_anchor(...)
end
local function munge_text(...)
munge_text = require(munge_text_module)
return munge_text(...)
end
local function process_params(...)
process_params = require(parameters_module).process
return process_params(...)
end
local function track(...)
track = require(debug_track_module)
return track(...)
end
local function u(...)
u = require(string_utilities_module).char
return u(...)
end
local function ugsub(...)
ugsub = require(string_utilities_module).gsub
return ugsub(...)
end
local function umatch(...)
umatch = require(string_utilities_module).match
return umatch(...)
end
--[==[
Loaders for objects, which load data (or some other object) into some variable, which can then be accessed as "foo or get_foo()", where the function get_foo sets the object to "foo" and then returns it. This ensures they are only loaded when needed, and avoids the need to check for the existence of the object each time, since once "foo" has been set, "get_foo" will not be called again.]==]
local m_data
local function get_data()
m_data, get_data = mw.loadData("Module:script utilities/data"), nil
return m_data
end
--[=[
Modules used:
[[Module:script utilities/data]]
[[Module:scripts]]
[[Module:anchors]] (only when IDs present)
[[Module:string utilities]] (only when hyphens in Korean text or spaces in vertical text)
[[Module:languages]]
[[Module:parameters]]
[[Module:utilities]]
[[Module:debug/track]]
]=]
function export.is_Latin_script(sc)
-- Latn, Latf, Latg, pjt-Latn
return sc:getCode():find("Lat") and true or false
end
--[==[{{temp|#invoke:script utilities|lang_t}}
This is used by {{temp|lang}} to wrap portions of text in a language tag. See there for more information.]==]
do
local function get_args(frame)
return process_params(frame:getParent().args, {
[1] = {required = true, type = "language", default = "und"},
[2] = {required = true, allow_empty = true, default = ""},
["sc"] = {type = "script"},
["face"] = true,
["class"] = true,
})
end
function export.lang_t(frame)
local args = get_args(frame)
local lang = args[1]
local sc = args["sc"]
local text = args[2]
local cats = {}
if sc then
-- Track uses of sc parameter.
if sc:getCode() == lang:findBestScript(text):getCode() then
insert(cats, lang:getFullName() .. " terms with redundant script codes")
else
insert(cats, lang:getFullName() .. " terms with non-redundant manual script codes")
end
else
sc = lang:findBestScript(text)
end
text = embedded_language_links{
term = text,
lang = lang,
sc = sc
}
cats = #cats > 0 and format_categories(cats, lang, "-", nil, nil, sc) or ""
local face = args["face"]
local class = args["class"]
return export.tag_text(text, lang, sc, face, class) .. cats
end
end
-- Ustring turns on the codepoint-aware string matching. The basic string function
-- should be used for simple sequences of characters, Ustring function for
-- sets – [].
local function trackPattern(text, pattern, tracking)
if pattern and umatch(text, pattern) then
track("script/" .. tracking)
end
end
local function track_text(text, lang, sc)
if lang and text then
local langCode = lang:getFullCode()
-- [[Special:WhatLinksHere/Wiktionary:Tracking/script/ang/acute]]
if langCode == "ang" then
local decomposed = toNFD(text)
local acute = u(0x301)
trackPattern(decomposed, acute, "ang/acute")
--[=[
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Greek/wrong-phi]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Greek/wrong-theta]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Greek/wrong-kappa]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Greek/wrong-rho]]
ϑ, ϰ, ϱ, ϕ should generally be replaced with θ, κ, ρ, φ.
]=]
elseif langCode == "el" or langCode == "grc" then
trackPattern(text, "ϑ", "Greek/wrong-theta")
trackPattern(text, "ϰ", "Greek/wrong-kappa")
trackPattern(text, "ϱ", "Greek/wrong-rho")
trackPattern(text, "ϕ", "Greek/wrong-phi")
--[=[
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Ancient Greek/spacing-coronis]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Ancient Greek/spacing-smooth-breathing]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Ancient Greek/wrong-apostrophe]]
When spacing coronis and spacing smooth breathing are used as apostrophes,
they should be replaced with right single quotation marks (’).
]=]
if langCode == "grc" then
trackPattern(text, u(0x1FBD), "Ancient Greek/spacing-coronis")
trackPattern(text, u(0x1FBF), "Ancient Greek/spacing-smooth-breathing")
trackPattern(text, "[" .. u(0x1FBD) .. u(0x1FBF) .. "]", "Ancient Greek/wrong-apostrophe", true)
end
-- [[Special:WhatLinksHere/Wiktionary:Tracking/script/Russian/grave-accent]]
elseif langCode == "ru" then
local decomposed = toNFD(text)
trackPattern(decomposed, u(0x300), "Russian/grave-accent")
-- [[Special:WhatLinksHere/Wiktionary:Tracking/script/Chuvash/latin-homoglyph]]
elseif langCode == "cv" then
trackPattern(text, "[ĂăĔĕÇçŸÿ]", "Chuvash/latin-homoglyph")
-- [[Special:WhatLinksHere/Wiktionary:Tracking/script/Tibetan/trailing-punctuation]]
elseif langCode == "bo" then
trackPattern(text, "[་།]$", "Tibetan/trailing-punctuation")
trackPattern(text, "[་།]%]%]$", "Tibetan/trailing-punctuation")
--[=[
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Thai/broken-ae]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Thai/broken-am]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Thai/wrong-rue-lue]]
]=]
elseif langCode == "th" then
trackPattern(text, "เ".."เ", "Thai/broken-ae")
trackPattern(text, "ํ[่้๊๋]?า", "Thai/broken-am")
trackPattern(text, "[ฤฦ]า", "Thai/wrong-rue-lue")
--[=[
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lao/broken-ae]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lao/broken-am]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lao/possible-broken-ho-no]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lao/possible-broken-ho-mo]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lao/possible-broken-ho-lo]]
]=]
elseif langCode == "lo" then
trackPattern(text, "ເ".."ເ", "Lao/broken-ae")
trackPattern(text, "ໍ[່້໊໋]?າ", "Lao/broken-am")
trackPattern(text, "ຫນ", "Lao/possible-broken-ho-no")
trackPattern(text, "ຫມ", "Lao/possible-broken-ho-mo")
trackPattern(text, "ຫລ", "Lao/possible-broken-ho-lo")
--[=[
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lü/broken-ae]]
[[Special:WhatLinksHere/Wiktionary:Tracking/script/Lü/possible-wrong-sequence]]
]=]
elseif langCode == "khb" then
trackPattern(text, "ᦵ".."ᦵ", "Lü/broken-ae")
trackPattern(text, "[ᦀ-ᦫ][ᦵᦶᦷᦺ]", "Lü/possible-wrong-sequence")
end
end
end
local function Kore_ruby(...)
-- Cache character sets on the first call.
local Hang_chars = get_script("Hang"):getCharacters()
local Hani_chars = get_script("Hani"):getCharacters()
-- Overwrite with the actual function, which is called directly on subsequent calls.
function Kore_ruby(txt)
return (ugsub(txt, "([%-".. Hani_chars .. "]+)%(([%-" .. Hang_chars .. "]+)%)", "<ruby>%1<rp>(</rp><rt>%2</rt><rp>)</rp></ruby>"))
end
return Kore_ruby(...)
end
--[==[Wraps the given text in HTML tags with appropriate CSS classes (see [[WT:CSS]]) for the [[Module:languages#Language objects|language]] and script. This is required for all non-English text on Wiktionary.
The actual tags and CSS classes that are added are determined by the <code>face</code> parameter. It can be one of the following:
; {{code|lua|"term"}}
: The text is wrapped in {{code|html|2=<i class="(sc) mention" lang="(lang)">...</i>}}.
; {{code|lua|"head"}}
: The text is wrapped in {{code|html|2=<strong class="(sc) headword" lang="(lang)">...</strong>}}.
; {{code|lua|"hypothetical"}}
: The text is wrapped in {{code|html|2=<span class="hypothetical-star">*</span><i class="(sc) hypothetical" lang="(lang)">...</i>}}.
; {{code|lua|"bold"}}
: The text is wrapped in {{code|html|2=<b class="(sc)" lang="(lang)">...</b>}}.
; {{code|lua|nil}}
: The text is wrapped in {{code|html|2=<span class="(sc)" lang="(lang)">...</span>}}.
The optional <code>class</code> parameter can be used to specify an additional CSS class to be added to the tag.]==]
function export.tag_text(text, lang, sc, face, class, id)
if not sc then
if lang then
sc = lang:findBestScript(text)
else
sc = find_best_script_without_lang(text)
end
end
track_text(text, lang, sc)
-- Replace space characters with newlines in Mongolian-script text, which is written top-to-bottom.
if sc:getDirection():find("vertical", nil, true) and text:find(" ", nil, true) then
text = munge_text(text, function(txt)
-- having extra parentheses makes sure only the first return value gets through
return (txt:gsub(" +", "<br>"))
end)
end
-- Hack Korean script text to remove hyphens.
-- FIXME: This should be handled in a more general fashion, but needs to
-- be efficient by not doing anything if no hyphens are present, and currently this is the only
-- language needing such processing.
-- 20220221: Also convert 漢字(한자) to ruby, instead of needing [[Template:Ruby]].
if sc:getCode() == "Kore" and text:match("[%-()g]") then
local title, display = require("Module:links").get_wikilink_parts(text, true)
if title ~= nil then -- special case that the text is a single link, do not munge and preserve affix hyphens
if lang and lang:getCode() == "okm" then -- Middle Korean code from [[User:Chom.kwoy]]
-- Comment from [[User:Lunabunn]]:
-- In Middle Korean orthography, syllable formation is phonemic as opposed to morpheme-boundary-based a la
-- modern Korean. As such, for example, if you were to write nam-i, it would be rendered as na.mi so if you
-- then put na-mi to indicate particle boundaries as in modern Korean, the hyphen would be misplaced.
-- Previously, this was alleviated by specialcasing na--mi but [[User:Theknightwho]] made that resolve to -
-- in the Hangul (previously we used to just delete all -s in Hangul processing), so it broke.
-- [[User:Chom.kwoy]] implemented a different solution, which is writing -> instead using however many >s to
-- shift the hyphen by that number of letters in the romanization.
-- By the time we are called, > signs have been converted to > by a call to encode_entities() in
-- make_link() in [[Module:links]] (near the bottom of the function).
-- 'g' in Middle Korean is a special sign to treat the following ㅇ sign as /G/ instead of null.
display = display:gsub(">", ""):gsub("g", "")
end
if display:find("<") then
display = munge_text(display, function(txt)
txt = txt:gsub("(.)%-(%-?)(.)", "%1%2%3")
return Kore_ruby(txt)
end)
else
display = display:gsub("(.)%-(%-?)(.)", "%1%2%3")
display = Kore_ruby(display)
end
text = "[[" .. title .. "|" .. display .. "]]"
else
text = munge_text(text, function(txt)
if lang and lang:getCode() == "okm" then
txt = txt:gsub(">", ""):gsub("g", "")
end
if txt == text then -- special case for the entire text being plain
txt = txt:gsub("(.)%-(%-?)(.)", "%1%2%3")
else
txt = txt:gsub("%-(%-?)", "%1")
end
return Kore_ruby(txt)
end)
end
end
if sc:getCode() == "Image" then
face = nil
end
if face == "hypothetical" then
-- [[Special:WhatLinksHere/Wiktionary:Tracking/script-utilities/face/hypothetical]]
track("script-utilities/face/hypothetical")
end
local data = (m_data or get_data()).faces[face or "plain"]
if data == nil then
error('Invalid script face "' .. face .. '".')
end
local tag = data.tag
local opening_tag = {tag}
if lang and id then
insert(opening_tag, 'id="' .. language_anchor(lang, id) .. '"')
end
local classes = {data.class}
-- if the script code is hyphenated (i.e. language code-script code, add the last component as a class as well)
-- e.g. ota-Arab adds both Arab and ota-Arab as classes
if sc:getCode():find("-", nil, true) then
insert(classes, 1, (ugsub(sc:getCode(), ".+%-", "")))
insert(classes, 2, sc:getCode())
else
insert(classes, 1, sc:getCode())
end
if class and class ~= '' then
insert(classes, class)
end
insert(opening_tag, 'class="' .. concat(classes, ' ') .. '"')
-- FIXME: Is it OK to insert the etymology-only lang code and have it fall back to the first part of the
-- lang code (by chopping off the '-...' part)? It seems the :lang() selector does this; not sure about
-- [lang=...] attributes.
if lang then
insert(opening_tag, 'lang="' .. lang:getFullCode() .. '"')
end
-- Add a script wrapper
return (data.prefix or "") .. "<" .. concat(opening_tag, " ") .. ">" .. text .. "</" .. tag .. ">"
end
--[==[Tags the transliteration for given text {translit} and language {lang}. It will add the language, script subtag (as defined in [https://www.rfc-editor.org/rfc/bcp/bcp47.txt BCP 47 2.2.3]) and [https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/dir dir] (directional) attributes as needed.
The optional <code>kind</code> parameter can be one of the following:
; {{code|lua|"term"}}
: tag transliteration for {{temp|mention}}
; {{code|lua|"usex"}}
: tag transliteration for {{temp|usex}}
; {{code|lua|"head"}}
: tag transliteration for {{temp|head}}
; {{code|lua|"default"}}
: default
The optional <code>attributes</code> parameter is used to specify additional HTML attributes for the tag.]==]
function export.tag_translit(translit, lang, kind, attributes, is_manual)
if type(lang) == "table" then
-- FIXME: Do better support for etym languages; see https://www.rfc-editor.org/rfc/bcp/bcp47.txt
lang = lang.getFullCode and lang:getFullCode()
or error("Second argument to tag_translit should be a language code or language object.")
end
local data = (m_data or get_data()).translit[kind or "default"]
local tag = data.tag
local opening_tag = {tag}
local class = data.class
if lang == "ja" then
insert(opening_tag, 'class="' .. (class and (class .. " ") or "") .. (is_manual and "manual-tr " or "") .. 'tr"')
else
insert(opening_tag, 'lang="' .. lang .. '-Latn"')
insert(opening_tag, 'class="' .. (class and (class .. " ") or "") .. (is_manual and "manual-tr " or "") .. 'tr Latn"')
end
local dir = data.dir
if dir then
insert(opening_tag, 'dir="' .. dir .. '"')
end
if attributes then
track("tag_translit/attributes")
insert(opening_tag, attributes)
end
return "<" .. concat(opening_tag, " ") .. ">" .. translit .. "</" .. tag .. ">"
end
function export.tag_transcription(transcription, lang, kind, attributes)
if type(lang) == "table" then
-- FIXME: Do better support for etym languages; see https://www.rfc-editor.org/rfc/bcp/bcp47.txt
lang = lang.getFullCode and lang:getFullCode()
or error("Second argument to tag_transcription should be a language code or language object.")
end
local data = (m_data or get_data()).transcription[kind or "default"]
local tag = data.tag
local opening_tag = {tag}
local class = data.class
if lang == "ja" then
insert(opening_tag, 'class="' .. (class and (class .. " ") or "") .. 'ts"')
else
insert(opening_tag, 'lang="' .. lang .. '-Latn"')
insert(opening_tag, 'class="' .. (class and (class .. " ") or "") .. 'ts Latn"')
end
local dir = data.dir
if dir then
insert(opening_tag, 'dir="' .. dir .. '"')
end
if attributes then
track("tag_transcription/attributes")
insert(opening_tag, attributes)
end
return "<" .. concat(opening_tag, " ") .. ">" .. transcription .. "</" .. tag .. ">"
end
--[==[Tags {def} as a definition.
The <code>def</code> parameter must be one of the following:
; {{code|lua|"gloss"}}
: The text is wrapped in {{code|html|2=<span class="(mention-gloss">...</span>}}.
; {{code|lua|"non-gloss"}}
: The text is wrapped in {{code|html|2=<span class="use-with-mention">...</span>}}.
The optional <code>attributes</code> parameter is used to specify additional HTML attributes for the tag.]==]
function export.tag_definition(def, kind, attributes)
local data = (m_data or get_data()).definition[kind]
if data == nil then
error("Second argument to tag_definition should specify the kind of definition from the list in [[Module:script utilities/data]].")
end
local tag = data.tag
local opening_tag = {tag}
local class = data.class
if class then
insert(opening_tag, 'class="' .. class .. '"')
end
if attributes then
insert(opening_tag, attributes)
end
return "<" .. concat(opening_tag, " ") .. ">" .. def .. "</" .. tag .. ">"
end
--[==[Generates a request to provide a term in its native script, if it is missing. This is used by the {{temp|rfscript}} template as well as by the functions in [[Module:links]].
The function will add entries to one of the subcategories of [[:Category:Requests for native script by language]], and do several checks on the given language and script. In particular:
* If the script was given, a subcategory named "Requests for (script) script" is added, but only if the language has more than one script. Otherwise, the main "Requests for native script" category is used.
* Nothing is added at all if the language has no scripts other than Latin and its varieties.]==]
function export.request_script(lang, sc, usex, nocat, sort_key)
local scripts = lang.getScripts and lang:getScripts() or error('The language "' .. lang:getCode() .. '" does not have the method getScripts. It may be unwritten.')
-- By default, request for "native" script
local cat_script = "native"
local disp_script = "script"
-- If the script was not specified, and the language has only one script, use that.
if not sc and #scripts == 1 then
sc = scripts[1]
end
-- Is the script known?
if sc and sc:getCode() ~= "None" then
-- If the script is Latin, return nothing.
if export.is_Latin_script(sc) then
return ""
end
if (not scripts[1]) or sc:getCode() ~= scripts[1]:getCode() then
disp_script = sc:getCanonicalName()
end
-- The category needs to be specific to script only if there is chance of ambiguity. This occurs when when the language has multiple scripts (or with codes such as "und").
if (not scripts[1]) or scripts[2] then
cat_script = sc:getCanonicalName()
end
else
-- The script is not known.
-- Does the language have at least one non-Latin script in its list?
local has_nonlatin = false
for _, val in ipairs(scripts) do
if not export.is_Latin_script(val) then
has_nonlatin = true
break
end
end
-- If there are no non-Latin scripts, return nothing.
if not has_nonlatin and lang:getCode() ~= "und" then
return ""
end
end
-- Etymology languages have their own categories, whose parents are the regular language.
return "<small>[" .. disp_script .. " needed]</small>" .. (nocat and "" or
format_categories("Requests for " .. cat_script .. " script " ..
(usex and "in" or "for") .. " " .. lang:getCanonicalName() .. " " ..
(usex == "quote" and "quotations" or usex and "usage examples" or "terms"),
lang, sort_key
)
)
end
--[==[This is used by {{temp|rfscript}}. See there for more information.]==]
function export.template_rfscript(frame)
local boolean = {type = "boolean"}
local args = process_params(frame:getParent().args, {
[1] = {required = true, type = "language", default = "und"},
["sc"] = {type = "script"},
["usex"] = boolean,
["quote"] = boolean,
["nocat"] = boolean,
["sort"] = true,
})
local ret = export.request_script(args[1], args["sc"], args.quote and "quote" or args.usex, args.nocat, args.sort)
if ret == "" then
error("This language is written in the Latin alphabet. It does not need a native script.")
end
return ret
end
function export.checkScript(text, scriptCode, result)
local scriptObject = get_script(scriptCode)
if not scriptObject then
error('The script code "' .. scriptCode .. '" is not recognized.')
end
local originalText = text
-- Remove non-letter characters.
text = ugsub(text, "%A+", "")
-- Remove all characters of the script in question.
text = ugsub(text, "[" .. scriptObject:getCharacters() .. "]+", "")
if text ~= "" then
if type(result) == "string" then
error(result)
else
error('The text "' .. originalText .. '" contains the letters "' .. text .. '" that do not belong to the ' .. scriptObject:getDisplayForm() .. '.', 2)
end
end
end
return export
7960wdva9og3gbbozo010obcpp5re6e
মডিউল:scripts/data
828
7204
507798
451688
2026-04-14T07:07:34Z
Redmin
6857
[[en:Module:scripts/data|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507798
Scribunto
text/plain
--[=[
When adding new scripts to this file, please don't forget to add
style definitons for the script in [[MediaWiki:Gadget-LanguagesAndScripts.css]].
]=]
local concat = table.concat
local insert = table.insert
local ipairs = ipairs
local next = next
local remove = table.remove
local select = select
local sort = table.sort
-- Loaded on demand, as it may not be needed (depending on the data).
local function u(...)
u = require("Module:string/char")
return u(...)
end
-- We can't use mw.loadData() on [[Module:languages/chars]] because [[Module:languages/data]] itself is sometimes loaded
-- using mw.loadData(), and calling mw.loadData() on [[Module:languages/chars]] will insert metatables into the
-- character tables, which the second mw.loadData() will choke on.
local m_chars = require("Module:languages/chars")
local c = m_chars.chars
local p = m_chars.puaChars
local cs = m_chars.chars_substitutions
------------------------------------------------------------------------------------
--
-- Helper functions
--
------------------------------------------------------------------------------------
-- Note: a[2] > b[2] means opens are sorted before closes if otherwise equal.
local function sort_ranges(a, b)
return a[1] < b[1] or a[1] == b[1] and a[2] > b[2]
end
-- Returns the union of two or more range tables.
local function union(...)
local ranges = {}
for i = 1, select("#", ...) do
local argt = select(i, ...)
for j, v in ipairs(argt) do
insert(ranges, {v, j % 2 == 1 and 1 or -1})
end
end
sort(ranges, sort_ranges)
local ret, i = {}, 0
for _, range in ipairs(ranges) do
i = i + range[2]
if i == 0 and range[2] == -1 then -- close
insert(ret, range[1])
elseif i == 1 and range[2] == 1 then -- open
if ret[#ret] and range[1] <= ret[#ret] + 1 then
remove(ret) -- merge adjacent ranges
else
insert(ret, range[1])
end
end
end
return ret
end
-- Adds the `characters` key, which is determined by a script's `ranges` table.
local function process_ranges(sc)
local ranges, chars = sc.ranges, {}
for i = 2, #ranges, 2 do
if ranges[i] == ranges[i - 1] then
insert(chars, u(ranges[i]))
else
insert(chars, u(ranges[i - 1]))
if ranges[i] > ranges[i - 1] + 1 then
insert(chars, "-")
end
insert(chars, u(ranges[i]))
end
end
sc.characters = concat(chars)
ranges.n = #ranges
return sc
end
local function handle_normalization_fixes(fixes)
local combiningClasses = fixes.combiningClasses
if combiningClasses then
local chars, i = {}, 0
for char in next, combiningClasses do
i = i + 1
chars[i] = char
end
fixes.combiningClassCharacters = concat(chars)
end
return fixes
end
------------------------------------------------------------------------------------
--
-- Data
--
------------------------------------------------------------------------------------
local m = {}
m["Adlm"] = process_ranges{
"Adlam",
19606346,
"alphabet",
ranges = {
0x061F, 0x061F,
0x0640, 0x0640,
0x1E900, 0x1E94B,
0x1E950, 0x1E959,
0x1E95E, 0x1E95F,
},
capitalized = true,
direction = "rtl",
}
m["Afak"] = {
"Afaka",
382019,
"syllabary",
-- Not in Unicode
}
m["Aghb"] = process_ranges{
"Caucasian Albanian",
2495716,
"alphabet",
ranges = {
0x10530, 0x10563,
0x1056F, 0x1056F,
},
}
m["Ahom"] = process_ranges{
"Ahom",
2839633,
"abugida",
ranges = {
0x11700, 0x1171A,
0x1171D, 0x1172B,
0x11730, 0x11746,
},
}
m["Arab"] = process_ranges{
"Arabic",
1828555,
"abjad", -- more precisely, impure abjad
varieties = {"Jawi", {"Nastaliq", "Nastaleeq"}},
ranges = {
0x0600, 0x06FF,
0x0750, 0x077F,
0x0870, 0x088E,
0x0890, 0x0891,
0x0897, 0x08E1,
0x08E3, 0x08FF,
0xFB50, 0xFBC2,
0xFBD3, 0xFD8F,
0xFD92, 0xFDC7,
0xFDCF, 0xFDCF,
0xFDF0, 0xFDFF,
0xFE70, 0xFE74,
0xFE76, 0xFEFC,
0x102E0, 0x102FB,
0x10E60, 0x10E7E,
0x10EC2, 0x10EC4,
0x10EFC, 0x10EFF,
0x1EE00, 0x1EE03,
0x1EE05, 0x1EE1F,
0x1EE21, 0x1EE22,
0x1EE24, 0x1EE24,
0x1EE27, 0x1EE27,
0x1EE29, 0x1EE32,
0x1EE34, 0x1EE37,
0x1EE39, 0x1EE39,
0x1EE3B, 0x1EE3B,
0x1EE42, 0x1EE42,
0x1EE47, 0x1EE47,
0x1EE49, 0x1EE49,
0x1EE4B, 0x1EE4B,
0x1EE4D, 0x1EE4F,
0x1EE51, 0x1EE52,
0x1EE54, 0x1EE54,
0x1EE57, 0x1EE57,
0x1EE59, 0x1EE59,
0x1EE5B, 0x1EE5B,
0x1EE5D, 0x1EE5D,
0x1EE5F, 0x1EE5F,
0x1EE61, 0x1EE62,
0x1EE64, 0x1EE64,
0x1EE67, 0x1EE6A,
0x1EE6C, 0x1EE72,
0x1EE74, 0x1EE77,
0x1EE79, 0x1EE7C,
0x1EE7E, 0x1EE7E,
0x1EE80, 0x1EE89,
0x1EE8B, 0x1EE9B,
0x1EEA1, 0x1EEA3,
0x1EEA5, 0x1EEA9,
0x1EEAB, 0x1EEBB,
0x1EEF0, 0x1EEF1,
},
direction = "rtl",
normalizationFixes = handle_normalization_fixes{
from = {"ٳ"},
to = {"اٟ"}
},
}
m["fa-Arab"] = {
"Arabic",
744068,
m["Arab"][3],
ranges = m["Arab"].ranges,
characters = m["Arab"].characters,
other_names = {"Perso-Arabic"},
direction = "rtl",
parent = "Arab",
normalizationFixes = m["Arab"].normalizationFixes,
}
m["kk-Arab"] = {
"Arabic",
90681452,
m["Arab"][3],
ranges = m["Arab"].ranges,
characters = m["Arab"].characters,
direction = "rtl",
parent = "Arab",
normalizationFixes = m["Arab"].normalizationFixes,
}
m["ks-Arab"] = m["fa-Arab"]
m["ku-Arab"] = m["fa-Arab"]
m["ms-Arab"] = m["kk-Arab"]
m["mzn-Arab"] = m["fa-Arab"]
m["ota-Arab"] = m["fa-Arab"]
m["pa-Arab"] = {
"Shahmukhi",
133800,
m["Arab"][3],
ranges = m["Arab"].ranges,
characters = m["Arab"].characters,
other_names = {"Arabic"},
direction = "rtl",
parent = "Arab",
normalizationFixes = m["Arab"].normalizationFixes,
}
m["ps-Arab"] = m["fa-Arab"]
m["sd-Arab"] = m["fa-Arab"]
m["tt-Arab"] = m["fa-Arab"]
m["ug-Arab"] = m["fa-Arab"]
m["ur-Arab"] = m["fa-Arab"]
-- Aran (Nastaliq) is subsumed into Arab
m["Armi"] = process_ranges{
"Imperial Aramaic",
26978,
"abjad",
ranges = {
0x10840, 0x10855,
0x10857, 0x1085F,
},
direction = "rtl",
}
m["Armn"] = process_ranges{
"Armenian",
11932,
"alphabet",
ranges = {
0x0531, 0x0556,
0x0559, 0x058A,
0x058D, 0x058F,
0xFB13, 0xFB17,
},
capitalized = true,
translit = "Armn-translit",
}
m["Avst"] = process_ranges{
"Avestan",
790681,
"alphabet",
ranges = {
0x10B00, 0x10B35,
0x10B39, 0x10B3F,
},
direction = "rtl",
}
m["pal-Avst"] = {
"Pazend",
4925073,
m["Avst"][3],
ranges = m["Avst"].ranges,
characters = m["Avst"].characters,
direction = "rtl",
parent = "Avst",
}
m["Bali"] = process_ranges{
"Balinese",
804984,
"abugida",
ranges = {
0x1B00, 0x1B4C,
0x1B4E, 0x1B7F,
},
}
m["Bamu"] = process_ranges{
"Bamum",
806024,
"syllabary",
ranges = {
0xA6A0, 0xA6F7,
0x16800, 0x16A38,
},
}
m["Bass"] = process_ranges{
"Bassa",
810458,
"alphabet",
aliases = {"Bassa Vah", "Vah"},
ranges = {
0x16AD0, 0x16AED,
0x16AF0, 0x16AF5,
},
}
m["Batk"] = process_ranges{
"Batak",
51592,
"abugida",
ranges = {
0x1BC0, 0x1BF3,
0x1BFC, 0x1BFF,
},
}
m["Beng"] = process_ranges{
"Bengali",
756802,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0980, 0x0983,
0x0985, 0x098C,
0x098F, 0x0990,
0x0993, 0x09A8,
0x09AA, 0x09B0,
0x09B2, 0x09B2,
0x09B6, 0x09B9,
0x09BC, 0x09C4,
0x09C7, 0x09C8,
0x09CB, 0x09CE,
0x09D7, 0x09D7,
0x09DC, 0x09DD,
0x09DF, 0x09E3,
0x09E6, 0x09EF,
0x09F2, 0x09FE,
0x1CD0, 0x1CD0,
0x1CD2, 0x1CD2,
0x1CD5, 0x1CD6,
0x1CD8, 0x1CD8,
0x1CE1, 0x1CE1,
0x1CEA, 0x1CEA,
0x1CED, 0x1CED,
0x1CF2, 0x1CF2,
0x1CF5, 0x1CF7,
0xA8F1, 0xA8F1,
},
normalizationFixes = handle_normalization_fixes{
from = {"অা", "ঋৃ", "ঌৢ"},
to = {"আ", "ৠ", "ৡ"}
},
}
m["as-Beng"] = process_ranges{
"Assamese",
191272,
m["Beng"][3],
other_names = {"Eastern Nagari"},
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0980, 0x0983,
0x0985, 0x098C,
0x098F, 0x0990,
0x0993, 0x09A8,
0x09AA, 0x09AF,
0x09B2, 0x09B2,
0x09B6, 0x09B9,
0x09BC, 0x09C4,
0x09C7, 0x09C8,
0x09CB, 0x09CE,
0x09D7, 0x09D7,
0x09DC, 0x09DD,
0x09DF, 0x09E3,
0x09E6, 0x09FE,
0x1CD0, 0x1CD0,
0x1CD2, 0x1CD2,
0x1CD5, 0x1CD6,
0x1CD8, 0x1CD8,
0x1CE1, 0x1CE1,
0x1CEA, 0x1CEA,
0x1CED, 0x1CED,
0x1CF2, 0x1CF2,
0x1CF5, 0x1CF7,
0xA8F1, 0xA8F1,
},
normalizationFixes = m["Beng"].normalizationFixes,
}
m["Bhks"] = process_ranges{
"Bhaiksuki",
17017839,
"abugida",
ranges = {
0x11C00, 0x11C08,
0x11C0A, 0x11C36,
0x11C38, 0x11C45,
0x11C50, 0x11C6C,
},
}
m["Blis"] = {
"Blissymbolic",
609817,
"logography",
aliases = {"Blissymbols"},
-- Not in Unicode
}
m["Bopo"] = process_ranges{
"Zhuyin",
198269,
"semisyllabary",
aliases = {"Zhuyin Fuhao", "Bopomofo"},
ranges = {
0x02EA, 0x02EB,
0x3001, 0x3003,
0x3008, 0x3011,
0x3013, 0x301F,
0x302A, 0x302D,
0x3030, 0x3030,
0x3037, 0x3037,
0x30FB, 0x30FB,
0x3105, 0x312F,
0x31A0, 0x31BF,
0xFE45, 0xFE46,
0xFF61, 0xFF65,
},
}
m["Brah"] = process_ranges{
"Brahmi",
185083,
"abugida",
ranges = {
0x11000, 0x1104D,
0x11052, 0x11075,
0x1107F, 0x1107F,
},
normalizationFixes = handle_normalization_fixes{
from = {"𑀅𑀸", "𑀋𑀾", "𑀏𑁂"},
to = {"𑀆", "𑀌", "𑀐"}
},
translit = "Brah-translit",
}
m["Brai"] = process_ranges{
"Braille",
79894,
"alphabet",
ranges = {
0x2800, 0x28FF,
},
}
m["Bugi"] = process_ranges{
"Lontara",
1074947,
"abugida",
aliases = {"Buginese"},
ranges = {
0x1A00, 0x1A1B,
0x1A1E, 0x1A1F,
0xA9CF, 0xA9CF,
},
}
m["Buhd"] = process_ranges{
"Buhid",
1002969,
"abugida",
ranges = {
0x1735, 0x1736,
0x1740, 0x1751,
0x1752, 0x1753,
},
}
m["Cakm"] = process_ranges{
"Chakma",
1059328,
"abugida",
ranges = {
0x09E6, 0x09EF,
0x1040, 0x1049,
0x11100, 0x11134,
0x11136, 0x11147,
},
}
m["Cans"] = process_ranges{
"Canadian syllabic",
2479183,
"abugida",
ranges = {
0x1400, 0x167F,
0x18B0, 0x18F5,
0x11AB0, 0x11ABF,
},
}
m["Cari"] = process_ranges{
"Carian",
1094567,
"alphabet",
ranges = {
0x102A0, 0x102D0,
},
}
m["Cham"] = process_ranges{
"Cham",
1060381,
"abugida",
ranges = {
0xAA00, 0xAA36,
0xAA40, 0xAA4D,
0xAA50, 0xAA59,
0xAA5C, 0xAA5F,
},
}
m["Cher"] = process_ranges{
"Cherokee",
26549,
"syllabary",
ranges = {
0x13A0, 0x13F5,
0x13F8, 0x13FD,
0xAB70, 0xABBF,
},
}
m["Chis"] = {
"Chisoi",
123173777,
"abugida",
-- Not in Unicode
}
m["Chrs"] = process_ranges{
"Khwarezmian",
72386710,
"abjad",
aliases = {"Chorasmian"},
ranges = {
0x10FB0, 0x10FCB,
},
direction = "rtl",
}
m["Copt"] = process_ranges{
"Coptic",
321083,
"alphabet",
ranges = {
0x03E2, 0x03EF,
0x2C80, 0x2CF3,
0x2CF9, 0x2CFF,
0x102E0, 0x102FB,
},
capitalized = true,
}
m["Cpmn"] = process_ranges{
"Cypro-Minoan",
1751985,
"syllabary",
aliases = {"Cypro Minoan"},
ranges = {
0x10100, 0x10101,
0x12F90, 0x12FF2,
},
}
m["Cprt"] = process_ranges{
"Cypriot",
1757689,
"syllabary",
ranges = {
0x10100, 0x10102,
0x10107, 0x10133,
0x10137, 0x1013F,
0x10800, 0x10805,
0x10808, 0x10808,
0x1080A, 0x10835,
0x10837, 0x10838,
0x1083C, 0x1083C,
0x1083F, 0x1083F,
},
direction = "rtl",
}
m["Cyrl"] = process_ranges{
"Cyrillic",
8209,
"alphabet",
ranges = {
0x0400, 0x052F,
0x1C80, 0x1C8A,
0x1D2B, 0x1D2B,
0x1D78, 0x1D78,
0x1DF8, 0x1DF8,
0x2DE0, 0x2DFF,
0x2E43, 0x2E43,
0xA640, 0xA69F,
0xFE2E, 0xFE2F,
0x1E030, 0x1E06D,
0x1E08F, 0x1E08F,
},
capitalized = true,
}
m["Cyrs"] = {
"Old Cyrillic",
442244,
m["Cyrl"][3],
aliases = {"Early Cyrillic"},
ranges = m["Cyrl"].ranges,
characters = m["Cyrl"].characters,
capitalized = m["Cyrl"].capitalized,
wikipedia_article = "Early Cyrillic alphabet",
normalizationFixes = handle_normalization_fixes{
from = {"Ѹ", "ѹ"},
to = {"Ꙋ", "ꙋ"}
},
strip_diacritics = {remove_diacritics = cs.Cyrs_remove_diacritics},
sort_key = {
remove_diacritics = cs.Cyrs_remove_diacritics,
from = {
"ї", "оу", -- 2 chars
"[ґꙣєѕꙃꙅꙁіꙇђꙉѻꙩꙫꙭꙮꚙꚛꙋѡѿꙍѽꙑѣꙗѥꙕѧꙙѩꙝꙛѫѭѯѱѳѵҁ]"
},
to = {
"и" .. p[1], "у", {
["ґ"] = "г" .. p[1], ["ꙣ"] = "д" .. p[1], ["є"] = "е", ["ѕ"] = "ж" .. p[1], ["ꙃ"] = "ж" .. p[1],
["ꙅ"] = "ж" .. p[1], ["ꙁ"] = "з", ["і"] = "и" .. p[1], ["ꙇ"] = "и" .. p[1], ["ђ"] = "и" .. p[2],
["ꙉ"] = "и" .. p[2], ["ѻ"] = "о", ["ꙩ"] = "о", ["ꙫ"] = "о", ["ꙭ"] = "о",
["ꙮ"] = "о", ["ꚙ"] = "о", ["ꚛ"] = "о", ["ꙋ"] = "у", ["ѡ"] = "х" .. p[1],
["ѿ"] = "х" .. p[1], ["ꙍ"] = "х" .. p[1], ["ѽ"] = "х" .. p[1], ["ꙑ"] = "ы", ["ѣ"] = "ь" .. p[1],
["ꙗ"] = "ь" .. p[2], ["ѥ"] = "ь" .. p[3], ["ꙕ"] = "ю", ["ѧ"] = "я", ["ꙙ"] = "я",
["ѩ"] = "я" .. p[1], ["ꙝ"] = "я" .. p[1], ["ꙛ"] = "я" .. p[2], ["ѫ"] = "я" .. p[3], ["ѭ"] = "я" .. p[4],
["ѯ"] = "я" .. p[5], ["ѱ"] = "я" .. p[6], ["ѳ"] = "я" .. p[7], ["ѵ"] = "я" .. p[8], ["ҁ"] = "я" .. p[9],
}
},
}
}
m["Deva"] = process_ranges{
"Devanagari",
38592,
"abugida",
ranges = {
0x0900, 0x097F,
0x1CD0, 0x1CF6,
0x1CF8, 0x1CF9,
0x20F0, 0x20F0,
0xA830, 0xA839,
0xA8E0, 0xA8FF,
0x11B00, 0x11B09,
},
normalizationFixes = handle_normalization_fixes{
from = {"ॆॆ", "ेे", "ाॅ", "ाॆ", "ाꣿ", "ॊॆ", "ाे", "ाै", "ोे", "ाऺ", "ॖॖ", "अॅ", "अॆ", "अा", "एॅ", "एॆ", "एे", "एꣿ", "ऎॆ", "अॉ", "आॅ", "अॊ", "आॆ", "अो", "आे", "अौ", "आै", "ओे", "अऺ", "अऻ", "आऺ", "अाꣿ", "आꣿ", "ऒॆ", "अॖ", "अॗ", "ॶॖ", "्?ा"},
to = {"ꣿ", "ै", "ॉ", "ॊ", "ॏ", "ॏ", "ो", "ौ", "ौ", "ऻ", "ॗ", "ॲ", "ऄ", "आ", "ऍ", "ऎ", "ऐ", "ꣾ", "ꣾ", "ऑ", "ऑ", "ऒ", "ऒ", "ओ", "ओ", "औ", "औ", "औ", "ॳ", "ॴ", "ॴ", "ॵ", "ॵ", "ॵ", "ॶ", "ॷ", "ॷ"}
},
}
m["Diak"] = process_ranges{
"Dhives Akuru",
3307073,
"abugida",
aliases = {"Dhivehi Akuru", "Dives Akuru", "Divehi Akuru"},
ranges = {
0x11900, 0x11906,
0x11909, 0x11909,
0x1190C, 0x11913,
0x11915, 0x11916,
0x11918, 0x11935,
0x11937, 0x11938,
0x1193B, 0x11946,
0x11950, 0x11959,
},
}
m["Dogr"] = process_ranges{
"Dogra",
72402987,
"abugida",
ranges = {
0x0964, 0x096F,
0xA830, 0xA839,
0x11800, 0x1183B,
},
}
m["Dsrt"] = process_ranges{
"Deseret",
1200582,
"alphabet",
ranges = {
0x10400, 0x1044F,
},
capitalized = true,
}
m["Dupl"] = process_ranges{
"Duployan",
5316025,
"alphabet",
ranges = {
0x1BC00, 0x1BC6A,
0x1BC70, 0x1BC7C,
0x1BC80, 0x1BC88,
0x1BC90, 0x1BC99,
0x1BC9C, 0x1BCA3,
},
}
m["Egyd"] = {
"Demotic",
188519,
"abjad, logography",
-- Not in Unicode
}
m["Egyh"] = {
"Hieratic",
208111,
"abjad, logography",
-- Unified with Egyptian hieroglyphic in Unicode
}
m["Egyp"] = process_ranges{
"Egyptian hieroglyphic",
132659,
"abjad, logography",
ranges = {
0x13000, 0x13455,
0x13460, 0x143FA,
},
varieties = {"Hieratic"},
wikipedia_article = "Egyptian hieroglyphs",
normalizationFixes = handle_normalization_fixes{
from = {"𓃁", "𓆖"},
to = {"𓃀𓂝", "𓆓𓏏𓇿"}
},
}
m["Elba"] = process_ranges{
"Elbasan",
1036714,
"alphabet",
ranges = {
0x10500, 0x10527,
},
}
m["Elym"] = process_ranges{
"Elymaic",
60744423,
"abjad",
ranges = {
0x10FE0, 0x10FF6,
},
direction = "rtl",
}
m["Ethi"] = process_ranges{
"Ethiopic",
257634,
"abugida",
aliases = {"Ge'ez", "Geʽez"},
ranges = {
0x1200, 0x1248,
0x124A, 0x124D,
0x1250, 0x1256,
0x1258, 0x1258,
0x125A, 0x125D,
0x1260, 0x1288,
0x128A, 0x128D,
0x1290, 0x12B0,
0x12B2, 0x12B5,
0x12B8, 0x12BE,
0x12C0, 0x12C0,
0x12C2, 0x12C5,
0x12C8, 0x12D6,
0x12D8, 0x1310,
0x1312, 0x1315,
0x1318, 0x135A,
0x135D, 0x137C,
0x1380, 0x1399,
0x2D80, 0x2D96,
0x2DA0, 0x2DA6,
0x2DA8, 0x2DAE,
0x2DB0, 0x2DB6,
0x2DB8, 0x2DBE,
0x2DC0, 0x2DC6,
0x2DC8, 0x2DCE,
0x2DD0, 0x2DD6,
0x2DD8, 0x2DDE,
0xAB01, 0xAB06,
0xAB09, 0xAB0E,
0xAB11, 0xAB16,
0xAB20, 0xAB26,
0xAB28, 0xAB2E,
0x1E7E0, 0x1E7E6,
0x1E7E8, 0x1E7EB,
0x1E7ED, 0x1E7EE,
0x1E7F0, 0x1E7FE,
},
sort_key = "Ethi-sortkey",
strip_diacritics = {remove_diacritics = u(0x135D) .. u(0x135E) .. u(0x135F)}
}
m["Gara"] = process_ranges{
"Garay",
3095302,
"alphabet",
capitalized = true,
direction = "rtl",
ranges = {
0x060C, 0x060C,
0x061B, 0x061B,
0x061F, 0x061F,
0x10D40, 0x10D65,
0x10D69, 0x10D85,
0x10D8E, 0x10D8F,
},
}
m["Geok"] = process_ranges{
"Khutsuri",
1090055,
"alphabet",
ranges = { -- Ⴀ-Ⴭ is Asomtavruli, ⴀ-ⴭ is Nuskhuri
0x10A0, 0x10C5,
0x10C7, 0x10C7,
0x10CD, 0x10CD,
0x10FB, 0x10FB,
0x2D00, 0x2D25,
0x2D27, 0x2D27,
0x2D2D, 0x2D2D,
},
varieties = {"Nuskhuri", "Asomtavruli"},
capitalized = true,
translit = "Geok-translit",
}
m["Geor"] = process_ranges{
"Georgian",
3317411,
"alphabet",
ranges = { -- ა-ჿ is lowercase Mkhedruli; Ა-Ჿ is uppercase Mkhedruli (Mtavruli)
0x0589, 0x0589,
0x10D0, 0x10FF,
0x1C90, 0x1CBA,
0x1CBD, 0x1CBF,
},
varieties = {"Mkhedruli", "Mtavruli"},
capitalized = true,
translit = "Geor-translit",
}
m["Glag"] = process_ranges{
"Glagolitic",
145625,
"alphabet",
ranges = {
0x0484, 0x0484,
0x0487, 0x0487,
0x0589, 0x0589,
0x10FB, 0x10FB,
0x2C00, 0x2C5F,
0x2E43, 0x2E43,
0xA66F, 0xA66F,
0x1E000, 0x1E006,
0x1E008, 0x1E018,
0x1E01B, 0x1E021,
0x1E023, 0x1E024,
0x1E026, 0x1E02A,
},
capitalized = true,
}
m["Gong"] = process_ranges{
"Gunjala Gondi",
18125340,
"abugida",
ranges = {
0x0964, 0x0965,
0x11D60, 0x11D65,
0x11D67, 0x11D68,
0x11D6A, 0x11D8E,
0x11D90, 0x11D91,
0x11D93, 0x11D98,
0x11DA0, 0x11DA9,
},
}
m["Gonm"] = process_ranges{
"Masaram Gondi",
16977603,
"abugida",
ranges = {
0x0964, 0x0965,
0x11D00, 0x11D06,
0x11D08, 0x11D09,
0x11D0B, 0x11D36,
0x11D3A, 0x11D3A,
0x11D3C, 0x11D3D,
0x11D3F, 0x11D47,
0x11D50, 0x11D59,
},
}
m["Goth"] = process_ranges{
"Gothic",
467784,
"alphabet",
ranges = {
0x10330, 0x1034A,
},
wikipedia_article = "Gothic alphabet",
}
m["Gran"] = process_ranges{
"Grantha",
1119274,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0BE6, 0x0BF3,
0x1CD0, 0x1CD0,
0x1CD2, 0x1CD3,
0x1CF2, 0x1CF4,
0x1CF8, 0x1CF9,
0x20F0, 0x20F0,
0x11300, 0x11303,
0x11305, 0x1130C,
0x1130F, 0x11310,
0x11313, 0x11328,
0x1132A, 0x11330,
0x11332, 0x11333,
0x11335, 0x11339,
0x1133B, 0x11344,
0x11347, 0x11348,
0x1134B, 0x1134D,
0x11350, 0x11350,
0x11357, 0x11357,
0x1135D, 0x11363,
0x11366, 0x1136C,
0x11370, 0x11374,
0x11FD0, 0x11FD1,
0x11FD3, 0x11FD3,
},
}
m["Grek"] = process_ranges{
"Greek",
8216,
"alphabet",
ranges = {
0x0341, 0x0341,
0x0374, 0x0375,
0x037E, 0x037E,
0x0384, 0x038A,
0x038C, 0x038C,
0x038E, 0x03A1,
0x03A3, 0x03D7,
0x03DA, 0x03DB,
0x03DE, 0x03E1,
0x03F0, 0x03F1,
0x03F4, 0x03F4,
0x03FC, 0x03FC,
0x1D26, 0x1D2A,
0x1D5D, 0x1D61,
0x1D66, 0x1D6A,
0x1DBF, 0x1DBF,
0x2126, 0x2127,
0x2129, 0x2129,
0x213C, 0x2140,
0xAB65, 0xAB65,
0x10140, 0x1018E,
0x101A0, 0x101A0,
0x1D200, 0x1D245,
},
capitalized = true,
display_text = cs["Grek-displaytext"],
strip_diacritics = cs["Grek-stripdiacritics"],
sort_key = {
remove_diacritics = "'ʼ;·`¨´῀" .. c.grave .. c.acute .. c.diaer .. c.caron .. c.turnedcommaabove .. c.commaabove .. c.revcommaabove .. c.macron .. c.breve .. c.diaerbelow .. c.brevebelow .. c.perispomeni .. c.ypogegrammeni .. c.RSQuo .. c.prime .. c.keraia .. c.lowerkeraia .. c.tonos .. c.coronis .. c.psili .. c.dasia,
from = {"ϝ", "ͷ", "ϛ", "ͱ", "ͺ", "ϳ", "ϻ", "[ϟϙ]", "[ςϲ]", "ͳ"},
to = {"ε" .. p[1], "ε" .. p[2], "ε" .. p[3], "ζ" .. p[1], "ι", "ι" .. p[1], "π" .. p[1], "π" .. p[2], "σ", "ϡ"},
},
}
m["Polyt"] = process_ranges{
"Greek",
1475332,
m["Grek"][3],
ranges = union(m["Grek"].ranges, {
0x0340, 0x0340,
0x0342, 0x0345,
0x0370, 0x0373,
0x0376, 0x0377,
0x037A, 0x037D,
0x037F, 0x037F,
0x03D8, 0x03D9,
0x03DC, 0x03DD,
0x03F2, 0x03F3,
0x03F5, 0x03FB,
0x03FD, 0x03FF,
0x1F00, 0x1F15,
0x1F18, 0x1F1D,
0x1F20, 0x1F45,
0x1F48, 0x1F4D,
0x1F50, 0x1F57,
0x1F59, 0x1F59,
0x1F5B, 0x1F5B,
0x1F5D, 0x1F5D,
0x1F5F, 0x1F7D,
0x1F80, 0x1FB4,
0x1FB6, 0x1FC4,
0x1FC6, 0x1FD3,
0x1FD6, 0x1FDB,
0x1FDD, 0x1FEF,
0x1FF2, 0x1FF4,
0x1FF6, 0x1FFE,
}),
ietf_subtag = "Grek",
capitalized = m["Grek"].capitalized,
parent = "Grek",
display_text = m["Grek"].display_text,
strip_diacritics = "Polyt-stripdiacritics",
sort_key = m["Grek"].sort_key,
translit = "grc-translit",
}
m["Gujr"] = process_ranges{
"Gujarati",
733944,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0A81, 0x0A83,
0x0A85, 0x0A8D,
0x0A8F, 0x0A91,
0x0A93, 0x0AA8,
0x0AAA, 0x0AB0,
0x0AB2, 0x0AB3,
0x0AB5, 0x0AB9,
0x0ABC, 0x0AC5,
0x0AC7, 0x0AC9,
0x0ACB, 0x0ACD,
0x0AD0, 0x0AD0,
0x0AE0, 0x0AE3,
0x0AE6, 0x0AF1,
0x0AF9, 0x0AFF,
0xA830, 0xA839,
},
normalizationFixes = handle_normalization_fixes{
from = {"ઓ", "અાૈ", "અા", "અૅ", "અે", "અૈ", "અૉ", "અો", "અૌ", "આૅ", "આૈ", "ૅા"},
to = {"અાૅ", "ઔ", "આ", "ઍ", "એ", "ઐ", "ઑ", "ઓ", "ઔ", "ઓ", "ઔ", "ૉ"}
},
}
m["Gukh"] = process_ranges{
"Khema",
110064239,
"abugida",
aliases = {"Gurung Khema", "Khema Phri", "Khema Lipi"},
ranges = {
0x0965, 0x0965,
0x16100, 0x16139,
},
}
m["Guru"] = process_ranges{
"Gurmukhi",
689894,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0A01, 0x0A03,
0x0A05, 0x0A0A,
0x0A0F, 0x0A10,
0x0A13, 0x0A28,
0x0A2A, 0x0A30,
0x0A32, 0x0A33,
0x0A35, 0x0A36,
0x0A38, 0x0A39,
0x0A3C, 0x0A3C,
0x0A3E, 0x0A42,
0x0A47, 0x0A48,
0x0A4B, 0x0A4D,
0x0A51, 0x0A51,
0x0A59, 0x0A5C,
0x0A5E, 0x0A5E,
0x0A66, 0x0A76,
0xA830, 0xA839,
},
normalizationFixes = handle_normalization_fixes{
from = {"ਅਾ", "ਅੈ", "ਅੌ", "ੲਿ", "ੲੀ", "ੲੇ", "ੳੁ", "ੳੂ", "ੳੋ"},
to = {"ਆ", "ਐ", "ਔ", "ਇ", "ਈ", "ਏ", "ਉ", "ਊ", "ਓ"}
},
}
m["Hang"] = process_ranges{
"Hangul",
8222,
"syllabary",
aliases = {"Hangeul"},
ranges = {
0x1100, 0x11FF,
0x3001, 0x3003,
0x3008, 0x3011,
0x3013, 0x301F,
0x302E, 0x3030,
0x3037, 0x3037,
0x30FB, 0x30FB,
0x3131, 0x318E,
0x3200, 0x321E,
0x3260, 0x327E,
0xA960, 0xA97C,
0xAC00, 0xD7A3,
0xD7B0, 0xD7C6,
0xD7CB, 0xD7FB,
0xFE45, 0xFE46,
0xFF61, 0xFF65,
0xFFA0, 0xFFBE,
0xFFC2, 0xFFC7,
0xFFCA, 0xFFCF,
0xFFD2, 0xFFD7,
0xFFDA, 0xFFDC,
},
}
m["Hani"] = process_ranges{
"Han",
8201,
"logography",
ranges = {
0x2E80, 0x2E99,
0x2E9B, 0x2EF3,
0x2F00, 0x2FD5,
0x2FF0, 0x2FFF,
0x3001, 0x3003,
0x3005, 0x3011,
0x3013, 0x301F,
0x3021, 0x302D,
0x3030, 0x3030,
0x3037, 0x303F,
0x3190, 0x319F,
0x31C0, 0x31E5,
0x31EF, 0x31EF,
0x3220, 0x3247,
0x3280, 0x32B0,
0x32C0, 0x32CB,
0x30FB, 0x30FB,
0x32FF, 0x32FF,
0x3358, 0x3370,
0x337B, 0x337F,
0x33E0, 0x33FE,
0x3400, 0x4DBF,
0x4E00, 0x9FFF,
0xA700, 0xA707,
0xF900, 0xFA6D,
0xFA70, 0xFAD9,
0xFE45, 0xFE46,
0xFF61, 0xFF65,
0x16FE2, 0x16FE3,
0x16FF0, 0x16FF1,
0x1D360, 0x1D371,
0x1F250, 0x1F251,
0x20000, 0x2A6DF,
0x2A700, 0x2B739,
0x2B740, 0x2B81D,
0x2B820, 0x2CEA1,
0x2CEB0, 0x2EBE0,
0x2EBF0, 0x2EE5D,
0x2F800, 0x2FA1D,
0x30000, 0x3134A,
0x31350, 0x323AF,
},
varieties = {"Hanzi", "Kanji", "Hanja", "Chu Nom"},
spaces = false,
}
m["Hans"] = {
"Simplified Han",
185614,
m["Hani"][3],
ranges = m["Hani"].ranges,
characters = m["Hani"].characters,
spaces = m["Hani"].spaces,
parent = "Hani",
}
m["Hant"] = {
"Traditional Han",
178528,
m["Hani"][3],
ranges = m["Hani"].ranges,
characters = m["Hani"].characters,
spaces = m["Hani"].spaces,
parent = "Hani",
}
m["Hano"] = process_ranges{
"Hanunoo",
1584045,
"abugida",
aliases = {"Hanunó'o", "Hanuno'o"},
ranges = {
0x1720, 0x1736,
},
}
m["Hatr"] = process_ranges{
"Hatran",
20813038,
"abjad",
ranges = {
0x108E0, 0x108F2,
0x108F4, 0x108F5,
0x108FB, 0x108FF,
},
direction = "rtl",
}
m["Hebr"] = process_ranges{
"Hebrew",
33513,
"abjad", -- more precisely, impure abjad
ranges = {
0x0591, 0x05C7,
0x05D0, 0x05EA,
0x05EF, 0x05F4,
0x2135, 0x2138,
0xFB1D, 0xFB36,
0xFB38, 0xFB3C,
0xFB3E, 0xFB3E,
0xFB40, 0xFB41,
0xFB43, 0xFB44,
0xFB46, 0xFB4F,
},
direction = "rtl",
display_text = "Hebr-common",
sort_key = "Hebr-common",
strip_diacritics = "Hebr-common",
}
m["Hira"] = process_ranges{
"Hiragana",
48332,
"syllabary",
ranges = {
0x3001, 0x3003,
0x3008, 0x3011,
0x3013, 0x301F,
0x3030, 0x3035,
0x3037, 0x3037,
0x303C, 0x303D,
0x3041, 0x3096,
0x3099, 0x30A0,
0x30FB, 0x30FC,
0xFE45, 0xFE46,
0xFF61, 0xFF65,
0xFF70, 0xFF70,
0xFF9E, 0xFF9F,
0x1B001, 0x1B11F,
0x1B132, 0x1B132,
0x1B150, 0x1B152,
0x1F200, 0x1F200,
},
varieties = {"Hentaigana"},
spaces = false,
}
m["Hluw"] = process_ranges{
"Anatolian hieroglyphic",
521323,
"logography, syllabary",
ranges = {
0x14400, 0x14646,
},
wikipedia_article = "Anatolian hieroglyphs",
}
m["Hmng"] = process_ranges{
"Pahawh Hmong",
365954,
"semisyllabary",
aliases = {"Hmong"},
ranges = {
0x16B00, 0x16B45,
0x16B50, 0x16B59,
0x16B5B, 0x16B61,
0x16B63, 0x16B77,
0x16B7D, 0x16B8F,
},
}
m["Hmnp"] = process_ranges{
"Nyiakeng Puachue Hmong",
33712499,
"alphabet",
ranges = {
0x1E100, 0x1E12C,
0x1E130, 0x1E13D,
0x1E140, 0x1E149,
0x1E14E, 0x1E14F,
},
}
m["Hung"] = process_ranges{
"Old Hungarian",
446224,
"alphabet",
aliases = {"Hungarian runic"},
ranges = {
0x10C80, 0x10CB2,
0x10CC0, 0x10CF2,
0x10CFA, 0x10CFF,
},
capitalized = true,
direction = "rtl",
}
m["Ibrnn"] = {
"Northeastern Iberian",
1113155,
"semisyllabary",
ietf_subtag = "Zzzz",
-- Not in Unicode
}
m["Ibrns"] = {
"Southeastern Iberian",
2305351,
"semisyllabary",
ietf_subtag = "Zzzz",
-- Not in Unicode
}
m["Image"] = {
-- To be used to avoid any formatting or link processing
"Image-rendered",
478798,
-- This should not have any characters listed
ietf_subtag = "Zyyy",
translit = false,
character_category = false, -- none
}
m["Inds"] = {
"Indus",
601388,
aliases = {"Harappan", "Indus Valley"},
}
m["Ipach"] = {
"International Phonetic Alphabet",
21204,
aliases = {"IPA"},
ietf_subtag = "Latn",
}
m["Ital"] = process_ranges{
"Old Italic",
4891256,
"alphabet",
ranges = {
0x10300, 0x10323,
0x1032D, 0x1032F,
},
translit = "Ital-translit",
}
m["Java"] = process_ranges{
"Javanese",
879704,
"abugida",
ranges = {
0xA980, 0xA9CD,
0xA9CF, 0xA9D9,
0xA9DE, 0xA9DF,
},
}
m["Jurc"] = {
"Jurchen",
912240,
"logography",
spaces = false,
}
m["Kali"] = process_ranges{
"Kayah Li",
4919239,
"abugida",
ranges = {
0xA900, 0xA92F,
},
}
m["Kana"] = process_ranges{
"Katakana",
82946,
"syllabary",
ranges = {
0x3001, 0x3003,
0x3008, 0x3011,
0x3013, 0x301F,
0x3030, 0x3035,
0x3037, 0x3037,
0x303C, 0x303D,
0x3099, 0x309C,
0x30A0, 0x30FF,
0x31F0, 0x31FF,
0x32D0, 0x32FE,
0x3300, 0x3357,
0xFE45, 0xFE46,
0xFF61, 0xFF9F,
0x1AFF0, 0x1AFF3,
0x1AFF5, 0x1AFFB,
0x1AFFD, 0x1AFFE,
0x1B000, 0x1B000,
0x1B120, 0x1B122,
0x1B155, 0x1B155,
0x1B164, 0x1B167,
},
spaces = false,
}
m["Kawi"] = process_ranges{
"Kawi",
975802,
"abugida",
ranges = {
0x11F00, 0x11F10,
0x11F12, 0x11F3A,
0x11F3E, 0x11F5A,
},
}
m["Khar"] = process_ranges{
"Kharoshthi",
1161266,
"abugida",
ranges = {
0x10A00, 0x10A03,
0x10A05, 0x10A06,
0x10A0C, 0x10A13,
0x10A15, 0x10A17,
0x10A19, 0x10A35,
0x10A38, 0x10A3A,
0x10A3F, 0x10A48,
0x10A50, 0x10A58,
},
direction = "rtl",
}
m["Khmr"] = process_ranges{
"Khmer",
1054190,
"abugida",
ranges = {
0x1780, 0x17DD,
0x17E0, 0x17E9,
0x17F0, 0x17F9,
0x19E0, 0x19FF,
},
spaces = false,
normalizationFixes = handle_normalization_fixes{
from = {"ឣ", "ឤ"},
to = {"អ", "អា"}
},
}
m["Khoj"] = process_ranges{
"Khojki",
1740656,
"abugida",
ranges = {
0x0AE6, 0x0AEF,
0xA830, 0xA839,
0x11200, 0x11211,
0x11213, 0x11241,
},
normalizationFixes = handle_normalization_fixes{
from = {"𑈀𑈬𑈱", "𑈀𑈬", "𑈀𑈱", "𑈀𑈳", "𑈁𑈱", "𑈆𑈬", "𑈬𑈰", "𑈬𑈱", "𑉀𑈮"},
to = {"𑈇", "𑈁", "𑈅", "𑈇", "𑈇", "𑈃", "𑈲", "𑈳", "𑈂"}
},
}
m["Khomt"] = {
"Khom Thai",
13023788,
"abugida",
-- Not in Unicode
}
m["Kitl"] = {
"Khitan large",
6401797,
"logography",
spaces = false,
}
m["Kits"] = process_ranges{
"Khitan small",
6401800,
"logography, syllabary",
ranges = {
0x16FE4, 0x16FE4,
0x18B00, 0x18CD5,
0x18CFF, 0x18CFF,
},
spaces = false,
}
m["Knda"] = process_ranges{
"Kannada",
839666,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0C80, 0x0C8C,
0x0C8E, 0x0C90,
0x0C92, 0x0CA8,
0x0CAA, 0x0CB3,
0x0CB5, 0x0CB9,
0x0CBC, 0x0CC4,
0x0CC6, 0x0CC8,
0x0CCA, 0x0CCD,
0x0CD5, 0x0CD6,
0x0CDD, 0x0CDE,
0x0CE0, 0x0CE3,
0x0CE6, 0x0CEF,
0x0CF1, 0x0CF3,
0x1CD0, 0x1CD0,
0x1CD2, 0x1CD3,
0x1CDA, 0x1CDA,
0x1CF2, 0x1CF2,
0x1CF4, 0x1CF4,
0xA830, 0xA835,
},
normalizationFixes = handle_normalization_fixes{
from = {"ಉಾ", "ಋಾ", "ಒೌ"},
to = {"ಊ", "ೠ", "ಔ"}
},
translit = "kn-translit",
}
m["Kpel"] = {
"Kpelle",
1586299,
"syllabary",
-- Not in Unicode
}
m["Krai"] = process_ranges{
"Kirat Rai",
123173834,
"abugida",
aliases = {"Rai", "Khambu Rai", "Rai Barṇamālā", "Kirat Khambu Rai"},
ranges = {
0x16D40, 0x16D79,
},
}
m["Kthi"] = process_ranges{
"Kaithi",
1253814,
"abugida",
ranges = {
0x0966, 0x096F,
0xA830, 0xA839,
0x11080, 0x110C2,
0x110CD, 0x110CD,
},
}
m["Kulit"] = {
"Kulitan",
6443044,
"abugida",
-- Not in Unicode
}
m["Lana"] = process_ranges{
"Tai Tham",
1314503,
"abugida",
aliases = {"Tham", "Tua Mueang", "Lanna"},
ranges = {
0x1A20, 0x1A5E,
0x1A60, 0x1A7C,
0x1A7F, 0x1A89,
0x1A90, 0x1A99,
0x1AA0, 0x1AAD,
},
spaces = false,
}
m["Laoo"] = process_ranges{
"Lao",
1815229,
"abugida",
ranges = {
0x0E81, 0x0E82,
0x0E84, 0x0E84,
0x0E86, 0x0E8A,
0x0E8C, 0x0EA3,
0x0EA5, 0x0EA5,
0x0EA7, 0x0EBD,
0x0EC0, 0x0EC4,
0x0EC6, 0x0EC6,
0x0EC8, 0x0ECE,
0x0ED0, 0x0ED9,
0x0EDC, 0x0EDF,
},
spaces = false,
}
m["Latn"] = process_ranges{
"Latin",
8229,
"alphabet",
aliases = {"Roman"},
ranges = {
0x0041, 0x005A,
0x0061, 0x007A,
0x00AA, 0x00AA,
0x00BA, 0x00BA,
0x00C0, 0x00D6,
0x00D8, 0x00F6,
0x00F8, 0x02B8,
0x02C0, 0x02C1,
0x02E0, 0x02E4,
0x0363, 0x036F,
0x0485, 0x0486,
0x0951, 0x0952,
0x10FB, 0x10FB,
0x1D00, 0x1D25,
0x1D2C, 0x1D5C,
0x1D62, 0x1D65,
0x1D6B, 0x1D77,
0x1D79, 0x1DBE,
0x1DF8, 0x1DF8,
0x1E00, 0x1EFF,
0x202F, 0x202F,
0x2071, 0x2071,
0x207F, 0x207F,
0x2090, 0x209C,
0x20F0, 0x20F0,
0x2100, 0x2125,
0x2128, 0x2128,
0x212A, 0x2134,
0x2139, 0x213B,
0x2141, 0x214E,
0x2160, 0x2188,
0x2C60, 0x2C7F,
0xA700, 0xA707,
0xA722, 0xA787,
0xA78B, 0xA7CD,
0xA7D0, 0xA7D1,
0xA7D3, 0xA7D3,
0xA7D5, 0xA7DC,
0xA7F2, 0xA7FF,
0xA92E, 0xA92E,
0xAB30, 0xAB5A,
0xAB5C, 0xAB64,
0xAB66, 0xAB69,
0xFB00, 0xFB06,
0xFF21, 0xFF3A,
0xFF41, 0xFF5A,
0x10780, 0x10785,
0x10787, 0x107B0,
0x107B2, 0x107BA,
0x1DF00, 0x1DF1E,
0x1DF25, 0x1DF2A,
},
varieties = {"Rumi", "Romaji", "Rōmaji", "Romaja"},
capitalized = true,
translit = false,
}
m["Latf"] = {
"Fraktur",
148443,
m["Latn"][3],
ranges = m["Latn"].ranges,
characters = m["Latn"].characters,
other_names = {"Blackletter"}, -- Blackletter is actually the parent "script"
capitalized = m["Latn"].capitalized,
translit = m["Latn"].translit,
parent = "Latn",
}
m["Latg"] = {
"Gaelic",
1432616,
m["Latn"][3],
ranges = m["Latn"].ranges,
characters = m["Latn"].characters,
other_names = {"Irish"},
capitalized = m["Latn"].capitalized,
translit = m["Latn"].translit,
parent = "Latn",
}
m["pjt-Latn"] = {
"Latin",
nil,
m["Latn"][3],
ranges = m["Latn"].ranges,
characters = m["Latn"].characters,
capitalized = m["Latn"].capitalized,
translit = m["Latn"].translit,
parent = "Latn",
}
m["Leke"] = {
"Leke",
19572613,
"abugida",
-- Not in Unicode
}
m["Lepc"] = process_ranges{
"Lepcha",
1481626,
"abugida",
aliases = {"Róng"},
ranges = {
0x1C00, 0x1C37,
0x1C3B, 0x1C49,
0x1C4D, 0x1C4F,
},
}
m["Limb"] = process_ranges{
"Limbu",
933796,
"abugida",
ranges = {
0x0965, 0x0965,
0x1900, 0x191E,
0x1920, 0x192B,
0x1930, 0x193B,
0x1940, 0x1940,
0x1944, 0x194F,
},
}
m["Lina"] = process_ranges{
"Linear A",
30972,
ranges = {
0x10107, 0x10133,
0x10600, 0x10736,
0x10740, 0x10755,
0x10760, 0x10767,
},
}
m["Linb"] = process_ranges{
"Linear B",
190102,
ranges = {
0x10000, 0x1000B,
0x1000D, 0x10026,
0x10028, 0x1003A,
0x1003C, 0x1003D,
0x1003F, 0x1004D,
0x10050, 0x1005D,
0x10080, 0x100FA,
0x10100, 0x10102,
0x10107, 0x10133,
0x10137, 0x1013F,
},
}
m["Lisu"] = process_ranges{
"Fraser",
1194621,
"alphabet",
aliases = {"Old Lisu", "Lisu"},
ranges = {
0x300A, 0x300B,
0xA4D0, 0xA4FF,
0x11FB0, 0x11FB0,
},
normalizationFixes = handle_normalization_fixes{
from = {"['’]", "[.ꓸ][.ꓸ]", "[.ꓸ][,ꓹ]"},
to = {"ʼ", "ꓺ", "ꓻ"}
},
translit = "Lisu-translit",
sort_key = {
from = {"𑾰"},
to = {"ꓬ" .. p[1]}
},
}
m["Loma"] = {
"Loma",
13023816,
"syllabary",
-- Not in Unicode
}
m["Lyci"] = process_ranges{
"Lycian",
913587,
"alphabet",
ranges = {
0x10280, 0x1029C,
},
}
m["Lydi"] = process_ranges{
"Lydian",
4261300,
"alphabet",
ranges = {
0x10920, 0x10939,
0x1093F, 0x1093F,
},
direction = "rtl",
}
m["Mahj"] = process_ranges{
"Mahajani",
6732850,
"abugida",
ranges = {
0x0964, 0x096F,
0xA830, 0xA839,
0x11150, 0x11176,
},
}
m["Maka"] = process_ranges{
"Makasar",
72947229,
"abugida",
aliases = {"Old Makasar"},
ranges = {
0x11EE0, 0x11EF8,
},
}
m["Mand"] = process_ranges{
"Mandaic",
1812130,
aliases = {"Mandaean"},
ranges = {
0x0640, 0x0640,
0x0840, 0x085B,
0x085E, 0x085E,
},
direction = "rtl",
}
m["Mani"] = process_ranges{
"Manichaean",
3544702,
"abjad",
ranges = {
0x0640, 0x0640,
0x10AC0, 0x10AE6,
0x10AEB, 0x10AF6,
},
direction = "rtl",
translit = "Mani-translit",
}
m["Marc"] = process_ranges{
"Marchen",
72403709,
"abugida",
ranges = {
0x11C70, 0x11C8F,
0x11C92, 0x11CA7,
0x11CA9, 0x11CB6,
},
}
m["Maya"] = process_ranges{
"Maya",
211248,
aliases = {"Maya hieroglyphic", "Mayan", "Mayan hieroglyphic"},
ranges = {
0x1D2E0, 0x1D2F3,
},
}
m["Medf"] = process_ranges{
"Medefaidrin",
1519764,
aliases = {"Oberi Okaime", "Oberi Ɔkaimɛ"},
ranges = {
0x16E40, 0x16E9A,
},
capitalized = true,
}
m["Mend"] = process_ranges{
"Mende",
951069,
aliases = {"Mende Kikakui"},
ranges = {
0x1E800, 0x1E8C4,
0x1E8C7, 0x1E8D6,
},
direction = "rtl",
}
m["Merc"] = process_ranges{
"Meroitic cursive",
73028124,
"abugida",
ranges = {
0x109A0, 0x109B7,
0x109BC, 0x109CF,
0x109D2, 0x109FF,
},
direction = "rtl",
}
m["Mero"] = process_ranges{
"Meroitic hieroglyphic",
73028623,
"abugida",
ranges = {
0x10980, 0x1099F,
},
direction = "rtl",
wikipedia_article = "Meroitic hieroglyphs",
}
m["Mlym"] = process_ranges{
"Malayalam",
1164129,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0D00, 0x0D0C,
0x0D0E, 0x0D10,
0x0D12, 0x0D44,
0x0D46, 0x0D48,
0x0D4A, 0x0D4F,
0x0D54, 0x0D63,
0x0D66, 0x0D7F,
0x1CDA, 0x1CDA,
0x1CF2, 0x1CF2,
0xA830, 0xA832,
},
normalizationFixes = handle_normalization_fixes{
from = {"ഇൗ", "ഉൗ", "എെ", "ഒാ", "ഒൗ", "ക്", "ണ്", "ന്റ", "ന്", "മ്", "യ്", "ര്", "ല്", "ള്", "ഴ്", "െെ", "ൻ്റ"},
to = {"ഈ", "ഊ", "ഐ", "ഓ", "ഔ", "ൿ", "ൺ", "ൻറ", "ൻ", "ൔ", "ൕ", "ർ", "ൽ", "ൾ", "ൖ", "ൈ", "ന്റ"}
},
translit = "ml-translit",
}
m["Modi"] = process_ranges{
"Modi",
1703713,
"abugida",
ranges = {
0xA830, 0xA839,
0x11600, 0x11644,
0x11650, 0x11659,
},
normalizationFixes = handle_normalization_fixes{
from = {"𑘀𑘹", "𑘀𑘺", "𑘁𑘹", "𑘁𑘺"},
to = {"𑘊", "𑘋", "𑘌", "𑘍"}
},
}
do
local Mong_displaytext = {
from = {"([ᠨ-ᡂᡸ])ᠶ([ᠨ-ᡂᡸ])", "([ᠠ-ᡂᡸ])ᠸ([^᠋ᠠ-ᠧ])", "([ᠠ-ᡂᡸ])ᠸ$"},
to = {"%1ᠢ%2", "%1ᠧ%2", "%1ᠧ"}
}
m["Mong"] = process_ranges{
"Mongolian",
1055705,
"alphabet",
aliases = {"Mongol bichig", "Hudum Mongol bichig"},
ranges = {
0x1800, 0x1805,
0x180A, 0x1819,
0x1820, 0x1842,
0x1878, 0x1878,
0x1880, 0x1897,
0x18A6, 0x18A6,
0x18A9, 0x18A9,
0x200C, 0x200D,
0x202F, 0x202F,
0x3001, 0x3002,
0x3008, 0x300B,
0x11660, 0x11668,
},
direction = "vertical-ltr",
display_text = Mong_displaytext,
strip_diacritics = Mong_displaytext,
translit = "Mong-translit",
}
m["mnc-Mong"] = process_ranges{
"Manchu",
122888,
m["Mong"][3],
ranges = {
0x1801, 0x1801,
0x1804, 0x1804,
0x1808, 0x180F,
0x1820, 0x1820,
0x1823, 0x1823,
0x1828, 0x182A,
0x182E, 0x1830,
0x1834, 0x1838,
0x183A, 0x183A,
0x185D, 0x185D,
0x185F, 0x1861,
0x1864, 0x1869,
0x186C, 0x1871,
0x1873, 0x1877,
0x1880, 0x1888,
0x188F, 0x188F,
0x189A, 0x18A5,
0x18A8, 0x18A8,
0x18AA, 0x18AA,
0x200C, 0x200D,
0x202F, 0x202F,
},
direction = "vertical-ltr",
parent = "Mong",
translit = "mnc-translit",
}
m["sjo-Mong"] = process_ranges{
"Xibe",
113624153,
m["Mong"][3],
aliases = {"Sibe"},
ranges = {
0x1804, 0x1804,
0x1807, 0x1807,
0x180A, 0x180F,
0x1820, 0x1820,
0x1823, 0x1823,
0x1828, 0x1828,
0x182A, 0x182A,
0x182E, 0x1830,
0x1834, 0x1838,
0x183A, 0x183A,
0x185D, 0x1872,
0x200C, 0x200D,
0x202F, 0x202F,
},
direction = "vertical-ltr",
parent = "mnc-Mong",
}
m["xwo-Mong"] = process_ranges{
"Clear Script",
529085,
m["Mong"][3],
aliases = {"Todo", "Todo bichig"},
ranges = {
0x1800, 0x1801,
0x1804, 0x1806,
0x180A, 0x1820,
0x1828, 0x1828,
0x182F, 0x1831,
0x1834, 0x1834,
0x1837, 0x1838,
0x183A, 0x183B,
0x1840, 0x1840,
0x1843, 0x185C,
0x1880, 0x1887,
0x1889, 0x188F,
0x1894, 0x1894,
0x1896, 0x1899,
0x18A7, 0x18A7,
0x200C, 0x200D,
0x202F, 0x202F,
0x11669, 0x1166C,
},
direction = "vertical-ltr",
parent = "Mong",
translit = "xwo-translit",
}
end
m["Moon"] = {
"Moon",
918391,
"alphabet",
aliases = {"Moon System of Embossed Reading", "Moon type", "Moon writing", "Moon alphabet", "Moon code"},
-- Not in Unicode
}
m["Morse"] = {
"Morse code",
79897,
ietf_subtag = "Zsym",
}
m["Mroo"] = process_ranges{
"Mru",
75919253,
aliases = {"Mro", "Mrung"},
ranges = {
0x16A40, 0x16A5E,
0x16A60, 0x16A69,
0x16A6E, 0x16A6F,
},
}
m["Mtei"] = process_ranges{
"Meitei Mayek",
2981413,
"abugida",
aliases = {"Meetei Mayek", "Manipuri"},
ranges = {
0xAAE0, 0xAAF6,
0xABC0, 0xABED,
0xABF0, 0xABF9,
},
}
m["Mult"] = process_ranges{
"Multani",
17047906,
"abugida",
ranges = {
0x0A66, 0x0A6F,
0x11280, 0x11286,
0x11288, 0x11288,
0x1128A, 0x1128D,
0x1128F, 0x1129D,
0x1129F, 0x112A9,
},
}
m["Music"] = process_ranges{
"musical notation",
233861,
"pictography",
ranges = {
0x2669, 0x266F,
0x1D100, 0x1D126,
0x1D129, 0x1D1EA,
},
ietf_subtag = "Zsym",
translit = false,
}
m["Mymr"] = process_ranges{
"Burmese",
43887939,
"abugida",
aliases = {"Myanmar"},
ranges = {
0x1000, 0x109F,
0xA92E, 0xA92E,
0xA9E0, 0xA9FE,
0xAA60, 0xAA7F,
0x116D0, 0x116E3,
},
spaces = false,
}
m["Nagm"] = process_ranges{
"Mundari Bani",
106917274,
"alphabet",
aliases = {"Nag Mundari"},
ranges = {
0x1E4D0, 0x1E4F9,
},
}
m["Nand"] = process_ranges{
"Nandinagari",
6963324,
"abugida",
ranges = {
0x0964, 0x0965,
0x0CE6, 0x0CEF,
0x1CE9, 0x1CE9,
0x1CF2, 0x1CF2,
0x1CFA, 0x1CFA,
0xA830, 0xA835,
0x119A0, 0x119A7,
0x119AA, 0x119D7,
0x119DA, 0x119E4,
},
}
m["Narb"] = process_ranges{
"Ancient North Arabian",
1472213,
"abjad",
aliases = {"Old North Arabian"},
ranges = {
0x10A80, 0x10A9F,
},
direction = "rtl",
translit = "Narb-translit",
}
m["Nbat"] = process_ranges{
"Nabataean",
855624,
"abjad",
aliases = {"Nabatean"},
ranges = {
0x10880, 0x1089E,
0x108A7, 0x108AF,
},
direction = "rtl",
}
m["Newa"] = process_ranges{
"Newa",
7237292,
"abugida",
aliases = {"Newar", "Newari", "Prachalit Nepal"},
ranges = {
0x11400, 0x1145B,
0x1145D, 0x11461,
},
}
m["Nkdb"] = {
"Dongba",
1190953,
"pictography",
aliases = {"Naxi Dongba", "Nakhi Dongba", "Tomba", "Tompa", "Mo-so"},
spaces = false,
-- Not in Unicode
}
m["Nkgb"] = {
"Geba",
731189,
"syllabary",
aliases = {"Nakhi Geba", "Naxi Geba"},
spaces = false,
-- Not in Unicode
}
m["Nkoo"] = process_ranges{
"N'Ko",
1062587,
"alphabet",
ranges = {
0x060C, 0x060C,
0x061B, 0x061B,
0x061F, 0x061F,
0x07C0, 0x07FA,
0x07FD, 0x07FF,
0xFD3E, 0xFD3F,
},
direction = "rtl",
}
m["None"] = {
"unspecified",
nil,
-- This should not have any characters listed
ietf_subtag = "Zyyy",
translit = false,
character_category = false, -- none
}
m["Nshu"] = process_ranges{
"Nüshu",
56436,
"syllabary",
aliases = {"Nushu"},
ranges = {
0x16FE1, 0x16FE1,
0x1B170, 0x1B2FB,
},
spaces = false,
}
m["Ogam"] = process_ranges{
"Ogham",
184661,
ranges = {
0x1680, 0x169C,
},
}
m["Olck"] = process_ranges{
"Ol Chiki",
201688,
aliases = {"Ol Chemetʼ", "Ol", "Santali"},
ranges = {
0x1C50, 0x1C7F,
},
}
m["Onao"] = process_ranges{
"Ol Onal",
108607084,
"alphabet",
ranges = {
0x0964, 0x0965,
0x1E5D0, 0x1E5FA,
0x1E5FF, 0x1E5FF,
},
}
m["Orkh"] = process_ranges{
"Old Turkic",
5058305,
aliases = {"Orkhon runic"},
ranges = {
0x10C00, 0x10C48,
},
direction = "rtl",
translit = "Orkh-translit",
}
m["Orya"] = process_ranges{
"Odia",
1760127,
"abugida",
aliases = {"Oriya"},
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0B01, 0x0B03,
0x0B05, 0x0B0C,
0x0B0F, 0x0B10,
0x0B13, 0x0B28,
0x0B2A, 0x0B30,
0x0B32, 0x0B33,
0x0B35, 0x0B39,
0x0B3C, 0x0B44,
0x0B47, 0x0B48,
0x0B4B, 0x0B4D,
0x0B55, 0x0B57,
0x0B5C, 0x0B5D,
0x0B5F, 0x0B63,
0x0B66, 0x0B77,
0x1CDA, 0x1CDA,
0x1CF2, 0x1CF2,
},
normalizationFixes = handle_normalization_fixes{
from = {"ଅା", "ଏୗ", "ଓୗ"},
to = {"ଆ", "ଐ", "ଔ"}
},
}
m["Osge"] = process_ranges{
"Osage",
7105529,
ranges = {
0x104B0, 0x104D3,
0x104D8, 0x104FB,
},
capitalized = true,
}
m["Osma"] = process_ranges{
"Osmanya",
1377866,
ranges = {
0x10480, 0x1049D,
0x104A0, 0x104A9,
},
}
m["Ougr"] = process_ranges{
"Old Uyghur",
1998938,
"abjad, alphabet",
ranges = {
0x0640, 0x0640,
0x10AF2, 0x10AF2,
0x10F70, 0x10F89,
},
-- This should ideally be "vertical-ltr", but getting the CSS right is tricky because it's right-to-left horizontally, but left-to-right vertically. Currently, displaying it vertically causes it to display bottom-to-top.
direction = "rtl",
}
m["Palm"] = process_ranges{
"Palmyrene",
17538100,
ranges = {
0x10860, 0x1087F,
},
direction = "rtl",
}
m["Pauc"] = process_ranges{
"Pau Cin Hau",
25339852,
ranges = {
0x11AC0, 0x11AF8,
},
}
m["Pcun"] = {
"Proto-Cuneiform",
1650699,
"pictography",
-- Not in Unicode
}
m["Pelm"] = {
"Proto-Elamite",
56305763,
"pictography",
-- Not in Unicode
}
m["Perm"] = process_ranges{
"Old Permic",
147899,
ranges = {
0x0483, 0x0483,
0x10350, 0x1037A,
},
}
m["Phag"] = process_ranges{
"Phags-pa",
822836,
"abugida",
ranges = {
0x1802, 0x1803,
0x1805, 0x1805,
0x200C, 0x200D,
0x202F, 0x202F,
0x3002, 0x3002,
0xA840, 0xA877,
},
direction = "vertical-ltr",
}
m["Phli"] = process_ranges{
"Inscriptional Pahlavi",
24089793,
"abjad",
ranges = {
0x10B60, 0x10B72,
0x10B78, 0x10B7F,
},
direction = "rtl",
}
m["Phlp"] = process_ranges{
"Psalter Pahlavi",
7253954,
"abjad",
ranges = {
0x0640, 0x0640,
0x10B80, 0x10B91,
0x10B99, 0x10B9C,
0x10BA9, 0x10BAF,
},
direction = "rtl",
}
m["Phlv"] = {
"Book Pahlavi",
72403118,
"abjad",
direction = "rtl",
wikipedia_article = "Pahlavi scripts#Book Pahlavi",
-- Not in Unicode
}
m["Phnx"] = process_ranges{
"Phoenician",
26752,
"abjad",
ranges = {
0x10900, 0x1091B,
0x1091F, 0x1091F,
},
direction = "rtl",
translit = "Phnx-translit",
}
m["Plrd"] = process_ranges{
"Pollard",
601734,
"abugida",
aliases = {"Miao"},
ranges = {
0x16F00, 0x16F4A,
0x16F4F, 0x16F87,
0x16F8F, 0x16F9F,
},
}
m["Prti"] = process_ranges{
"Inscriptional Parthian",
13023804,
ranges = {
0x10B40, 0x10B55,
0x10B58, 0x10B5F,
},
direction = "rtl",
}
m["Psin"] = {
"Proto-Sinaitic",
1065250,
"abjad",
direction = "rtl",
-- Not in Unicode
}
m["Ranj"] = {
"Ranjana",
2385276,
"abugida",
-- Not in Unicode
}
m["Rjng"] = process_ranges{
"Rejang",
2007960,
"abugida",
ranges = {
0xA930, 0xA953,
0xA95F, 0xA95F,
},
}
m["Rohg"] = process_ranges{
"Hanifi Rohingya",
21028705,
"alphabet",
ranges = {
0x060C, 0x060C,
0x061B, 0x061B,
0x061F, 0x061F,
0x0640, 0x0640,
0x06D4, 0x06D4,
0x10D00, 0x10D27,
0x10D30, 0x10D39,
},
direction = "rtl",
}
m["Roro"] = {
"Rongorongo",
209764,
-- Not in Unicode
}
m["Rumin"] = process_ranges{
"Rumi numerals",
nil,
ranges = {
0x10E60, 0x10E7E,
},
ietf_subtag = "Arab",
}
m["Runr"] = process_ranges{
"Runic",
82996,
"alphabet",
ranges = {
0x16A0, 0x16EA,
0x16EE, 0x16F8,
},
}
do
local Samr_stripdiacritics = {
remove_diacritics = c.CGJ .. u(0x0816) .. "-" .. u(0x082D),
}
m["Samr"] = process_ranges{
"Samaritan",
1550930,
"abjad",
ranges = {
0x0800, 0x082D,
0x0830, 0x083E,
},
direction = "rtl",
strip_diacritics = Samr_stripdiacritics,
sort_key = Samr_stripdiacritics,
}
end
m["Sarb"] = process_ranges{
"Ancient South Arabian",
446074,
"abjad",
aliases = {"Old South Arabian"},
ranges = {
0x10A60, 0x10A7F,
},
direction = "rtl",
translit = "Sarb-translit",
}
m["Saur"] = process_ranges{
"Saurashtra",
3535165,
"abugida",
ranges = {
0xA880, 0xA8C5,
0xA8CE, 0xA8D9,
},
}
m["Semap"] = {
"flag semaphore",
250796,
"pictography",
ietf_subtag = "Zsym",
}
m["Sgnw"] = process_ranges{
"SignWriting",
1497335,
"pictography",
aliases = {"Sutton SignWriting"},
ranges = {
0x1D800, 0x1DA8B,
0x1DA9B, 0x1DA9F,
0x1DAA1, 0x1DAAF,
},
translit = false,
}
m["Shaw"] = process_ranges{
"Shavian",
1970098,
aliases = {"Shaw"},
ranges = {
0x10450, 0x1047F,
},
}
m["Shrd"] = process_ranges{
"Sharada",
2047117,
"abugida",
ranges = {
0x0951, 0x0951,
0x1CD7, 0x1CD7,
0x1CD9, 0x1CD9,
0x1CDC, 0x1CDD,
0x1CE0, 0x1CE0,
0xA830, 0xA835,
0xA838, 0xA838,
0x11180, 0x111DF,
},
translit = "Shrd-translit",
}
m["Shui"] = {
"Sui",
752854,
"logography",
spaces = false,
-- Not in Unicode
}
m["Sidd"] = process_ranges{
"Siddham",
250379,
"abugida",
ranges = {
0x11580, 0x115B5,
0x115B8, 0x115DD,
},
translit = "Sidd-translit",
}
m["Sidt"] = {
"Sidetic",
36659,
"alphabet",
direction = "rtl",
-- Not in Unicode
}
m["Sind"] = process_ranges{
"Khudabadi",
6402810,
"abugida",
aliases = {"Khudawadi"},
ranges = {
0x0964, 0x0965,
0xA830, 0xA839,
0x112B0, 0x112EA,
0x112F0, 0x112F9,
},
normalizationFixes = handle_normalization_fixes{
from = {"𑊰𑋠", "𑊰𑋥", "𑊰𑋦", "𑊰𑋧", "𑊰𑋨"},
to = {"𑊱", "𑊶", "𑊷", "𑊸", "𑊹"}
},
}
m["Sinh"] = process_ranges{
"Sinhalese",
1574992,
"abugida",
aliases = {"Sinhala"},
ranges = {
0x0964, 0x0965,
0x0D81, 0x0D83,
0x0D85, 0x0D96,
0x0D9A, 0x0DB1,
0x0DB3, 0x0DBB,
0x0DBD, 0x0DBD,
0x0DC0, 0x0DC6,
0x0DCA, 0x0DCA,
0x0DCF, 0x0DD4,
0x0DD6, 0x0DD6,
0x0DD8, 0x0DDF,
0x0DE6, 0x0DEF,
0x0DF2, 0x0DF4,
0x1CF2, 0x1CF2,
0x111E1, 0x111F4,
},
normalizationFixes = handle_normalization_fixes{
from = {"අා", "අැ", "අෑ", "උෟ", "ඍෘ", "ඏෟ", "එ්", "එෙ", "ඔෟ", "ෘෘ"},
to = {"ආ", "ඇ", "ඈ", "ඌ", "ඎ", "ඐ", "ඒ", "ඓ", "ඖ", "ෲ"}
},
}
m["Sogd"] = process_ranges{
"Sogdian",
578359,
"abjad",
ranges = {
0x0640, 0x0640,
0x10F30, 0x10F59,
},
direction = "rtl",
}
m["Sogo"] = process_ranges{
"Old Sogdian",
72403254,
"abjad",
ranges = {
0x10F00, 0x10F27,
},
direction = "rtl",
}
m["Sora"] = process_ranges{
"Sorang Sompeng",
7563292,
aliases = {"Sora Sompeng"},
ranges = {
0x110D0, 0x110E8,
0x110F0, 0x110F9,
},
}
m["Soyo"] = process_ranges{
"Soyombo",
8009382,
"abugida",
ranges = {
0x11A50, 0x11AA2,
},
}
m["Sund"] = process_ranges{
"Sundanese",
51589,
"abugida",
ranges = {
0x1B80, 0x1BBF,
0x1CC0, 0x1CC7,
},
}
m["Sunu"] = process_ranges{
"Sunuwar",
109984965,
"alphabet",
ranges = {
0x11BC0, 0x11BE1,
0x11BF0, 0x11BF9,
},
}
m["Sylo"] = process_ranges{
"Sylheti Nagri",
144128,
"abugida",
aliases = {"Sylheti Nāgarī", "Syloti Nagri"},
ranges = {
0x0964, 0x0965,
0x09E6, 0x09EF,
0xA800, 0xA82C,
},
}
m["Syrc"] = process_ranges{
"Syriac",
26567,
"abjad", -- more precisely, impure abjad
ranges = {
0x060C, 0x060C,
0x061B, 0x061C,
0x061F, 0x061F,
0x0640, 0x0640,
0x064B, 0x0655,
0x0670, 0x0670,
0x0700, 0x070D,
0x070F, 0x074A,
0x074D, 0x074F,
0x0860, 0x086A,
0x1DF8, 0x1DF8,
0x1DFA, 0x1DFA,
},
direction = "rtl",
}
-- Syre, Syrj, Syrn are apparently subsumed into Syrc; discuss if this causes issues
m["Tagb"] = process_ranges{
"Tagbanwa",
977444,
"abugida",
ranges = {
0x1735, 0x1736,
0x1760, 0x176C,
0x176E, 0x1770,
0x1772, 0x1773,
},
}
m["Takr"] = process_ranges{
"Takri",
759202,
"abugida",
ranges = {
0x0964, 0x0965,
0xA830, 0xA839,
0x11680, 0x116B9,
0x116C0, 0x116C9,
},
normalizationFixes = handle_normalization_fixes{
from = {"𑚀𑚭", "𑚀𑚴", "𑚀𑚵", "𑚆𑚲"},
to = {"𑚁", "𑚈", "𑚉", "𑚇"}
},
}
m["Tale"] = process_ranges{
"Tai Nüa",
2566326,
"abugida",
aliases = {"Tai Nuea", "New Tai Nüa", "New Tai Nuea", "Dehong Dai", "Tai Dehong", "Tai Le"},
ranges = {
0x1040, 0x1049,
0x1950, 0x196D,
0x1970, 0x1974,
},
spaces = false,
}
m["Talu"] = process_ranges{
"New Tai Lue",
3498863,
"abugida",
ranges = {
0x1980, 0x19AB,
0x19B0, 0x19C9,
0x19D0, 0x19DA,
0x19DE, 0x19DF,
},
spaces = false,
}
m["Taml"] = process_ranges{
"Tamil",
26803,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0B82, 0x0B83,
0x0B85, 0x0B8A,
0x0B8E, 0x0B90,
0x0B92, 0x0B95,
0x0B99, 0x0B9A,
0x0B9C, 0x0B9C,
0x0B9E, 0x0B9F,
0x0BA3, 0x0BA4,
0x0BA8, 0x0BAA,
0x0BAE, 0x0BB9,
0x0BBE, 0x0BC2,
0x0BC6, 0x0BC8,
0x0BCA, 0x0BCD,
0x0BD0, 0x0BD0,
0x0BD7, 0x0BD7,
0x0BE6, 0x0BFA,
0x1CDA, 0x1CDA,
0xA8F3, 0xA8F3,
0x11301, 0x11301,
0x11303, 0x11303,
0x1133B, 0x1133C,
0x11FC0, 0x11FF1,
0x11FFF, 0x11FFF,
},
normalizationFixes = handle_normalization_fixes{
from = {"அூ", "ஸ்ரீ"},
to = {"ஆ", "ஶ்ரீ"}
},
}
m["Tang"] = process_ranges{
"Tangut",
1373610,
"logography, syllabary",
ranges = {
0x31EF, 0x31EF,
0x16FE0, 0x16FE0,
0x17000, 0x187F7,
0x18800, 0x18AFF,
0x18D00, 0x18D08,
},
spaces = false,
translit = "txg-translit",
}
m["Tavt"] = process_ranges{
"Tai Viet",
11818517,
"abugida",
ranges = {
0xAA80, 0xAAC2,
0xAADB, 0xAADF,
},
spaces = false,
}
m["Tayo"] = process_ranges{
"Lai Tay",
16306701,
"abugida",
aliases = {"Tai Yo"},
direction = "vertical-rtl",
ranges = {
0x1E6C0, 0x1E6DE,
0x1E6E0, 0x1E6F5,
0x1E6FE, 0x1E6FF,
},
spaces = false,
}
m["Telu"] = process_ranges{
"Telugu",
570450,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x0C00, 0x0C0C,
0x0C0E, 0x0C10,
0x0C12, 0x0C28,
0x0C2A, 0x0C39,
0x0C3C, 0x0C44,
0x0C46, 0x0C48,
0x0C4A, 0x0C4D,
0x0C55, 0x0C56,
0x0C58, 0x0C5A,
0x0C5D, 0x0C5D,
0x0C60, 0x0C63,
0x0C66, 0x0C6F,
0x0C77, 0x0C7F,
0x1CDA, 0x1CDA,
0x1CF2, 0x1CF2,
},
normalizationFixes = handle_normalization_fixes{
from = {"ఒౌ", "ఒౕ", "ిౕ", "ెౕ", "ొౕ"},
to = {"ఔ", "ఓ", "ీ", "ే", "ో"}
},
}
m["Teng"] = {
"Tengwar",
473725,
}
m["Tfng"] = process_ranges{
"Tifinagh",
208503,
"abjad, alphabet",
ranges = {
0x2D30, 0x2D67,
0x2D6F, 0x2D70,
0x2D7F, 0x2D7F,
},
other_names = {"Libyco-Berber", "Berber"}, -- per Wikipedia, Libyco-Berber is the parent
}
m["Tglg"] = process_ranges{
"Baybayin",
812124,
"abugida",
aliases = {"Tagalog"},
varieties = {"Kur-itan"},
ranges = {
0x1700, 0x1715,
0x171F, 0x171F,
0x1735, 0x1736,
},
}
m["Thaa"] = process_ranges{
"Thaana",
877906,
"abugida",
ranges = {
0x060C, 0x060C,
0x061B, 0x061C,
0x061F, 0x061F,
0x0660, 0x0669,
0x0780, 0x07B1,
0xFDF2, 0xFDF2,
0xFDFD, 0xFDFD,
},
direction = "rtl",
}
m["Thai"] = process_ranges{
"Thai",
236376,
"abugida",
ranges = {
0x0E01, 0x0E3A,
0x0E40, 0x0E5B,
},
spaces = false,
}
do
local Tibt_displaytext = {
from = {"ༀ", "༌", "།།", "༚༚", "༚༝", "༝༚", "༝༝", "ཷ", "ཹ", "ེེ", "ོོ"},
to = {"ཨོཾ", "་", "༎", "༛", "༟", "࿎", "༞", "ྲཱྀ", "ླཱྀ", "ཻ", "ཽ"}
}
m["Tibt"] = process_ranges{
"Tibetan",
46861,
"abugida",
ranges = {
0x0F00, 0x0F47,
0x0F49, 0x0F6C,
0x0F71, 0x0F97,
0x0F99, 0x0FBC,
0x0FBE, 0x0FCC,
0x0FCE, 0x0FD4,
0x0FD9, 0x0FDA,
0x3008, 0x300B,
},
normalizationFixes = handle_normalization_fixes{
combiningClasses = {["༹"] = 1},
from = {"ཷ", "ཹ"},
to = {"ྲཱྀ", "ླཱྀ"}
},
display_text = Tibt_displaytext,
strip_diacritics = Tibt_displaytext,
sort_key = "Tibt-sortkey",
translit = "Tibt-translit",
}
m["sit-tam-Tibt"] = {
"Tamyig",
109875213,
m["Tibt"][3],
-- There is no inheritance of properties currently implemented for scripts. Per [[User:Theknightwho]], this
-- is because it's tricky to do since there are several types of child scripts: those that are mere display
-- variants (like fa-Arab, kk-Arab), which should be eliminated in favor of CSS language selectors to
-- handle the font differences; those that are genuinely different scripts that happen to share the same
-- Unicode codepoints but have mostly different properties (e.g. Manchu vs. Mongolian); and those that are
-- somewhere in between (like Tamyig vs. Tibetan). As a result, we currently have to manually specify
-- which properties we want inherited as follows.
ranges = m["Tibt"].ranges,
characters = m["Tibt"].characters,
parent = "Tibt",
normalizationFixes = m["Tibt"].normalizationFixes,
display_text = m["Tibt"].display_text,
strip_diacritics = m["Tibt"].strip_diacritics,
sort_key = m["Tibt"].sort_key,
translit = m["Tibt"].translit,
}
end
m["Tirh"] = process_ranges{
"Tirhuta",
1765752,
"abugida",
ranges = {
0x0951, 0x0952,
0x0964, 0x0965,
0x1CF2, 0x1CF2,
0xA830, 0xA839,
0x11480, 0x114C7,
0x114D0, 0x114D9,
},
normalizationFixes = handle_normalization_fixes{
from = {"𑒁𑒰", "𑒋𑒺", "𑒍𑒺", "𑒪𑒵", "𑒪𑒶"},
to = {"𑒂", "𑒌", "𑒎", "𑒉", "𑒊"}
},
}
m["Tnsa"] = process_ranges{
"Tangsa",
105576311,
"alphabet",
ranges = {
0x16A70, 0x16ABE,
0x16AC0, 0x16AC9,
},
}
m["Todr"] = process_ranges{
"Todhri",
10274731,
"alphabet",
direction = "rtl",
ranges = {
0x105C0, 0x105F3,
},
}
m["Tols"] = {
"Tolong Siki",
4459822,
"alphabet",
-- Not in Unicode
}
m["Toto"] = process_ranges{
"Toto",
104837516,
"abugida",
ranges = {
0x1E290, 0x1E2AE,
},
}
m["Tutg"] = process_ranges{
"Tigalari",
2604990,
"abugida",
aliases = {"Tulu"},
ranges = {
0x1CF2, 0x1CF2,
0x1CF4, 0x1CF4,
0xA8F1, 0xA8F1,
0x11380, 0x11389,
0x1138B, 0x1138B,
0x1138E, 0x1138E,
0x11390, 0x113B5,
0x113B7, 0x113C0,
0x113C2, 0x113C2,
0x113C5, 0x113C5,
0x113C7, 0x113CA,
0x113CC, 0x113D5,
0x113D7, 0x113D8,
0x113E1, 0x113E2,
},
}
m["Ugar"] = process_ranges{
"Ugaritic",
332652,
"abjad",
ranges = {
0x10380, 0x1039D,
0x1039F, 0x1039F,
},
}
m["Vaii"] = process_ranges{
"Vai",
523078,
"syllabary",
ranges = {
0xA500, 0xA62B,
},
}
m["Visp"] = {
"Visible Speech",
1303365,
"alphabet",
-- Not in Unicode
}
m["Vith"] = process_ranges{
"Vithkuqi",
3301993,
"alphabet",
ranges = {
0x10570, 0x1057A,
0x1057C, 0x1058A,
0x1058C, 0x10592,
0x10594, 0x10595,
0x10597, 0x105A1,
0x105A3, 0x105B1,
0x105B3, 0x105B9,
0x105BB, 0x105BC,
},
capitalized = true,
}
m["Wara"] = process_ranges{
"Varang Kshiti",
79199,
aliases = {"Warang Citi"},
ranges = {
0x118A0, 0x118F2,
0x118FF, 0x118FF,
},
capitalized = true,
}
m["Wcho"] = process_ranges{
"Wancho",
33713728,
"alphabet",
ranges = {
0x1E2C0, 0x1E2F9,
0x1E2FF, 0x1E2FF,
},
}
m["Wole"] = {
"Woleai",
6643710,
"syllabary",
-- Not in Unicode
}
m["Xpeo"] = process_ranges{
"Old Persian",
1471822,
ranges = {
0x103A0, 0x103C3,
0x103C8, 0x103D5,
},
}
m["Xsux"] = process_ranges{
"Cuneiform",
401,
aliases = {"Sumero-Akkadian Cuneiform"},
ranges = {
0x12000, 0x12399,
0x12400, 0x1246E,
0x12470, 0x12474,
0x12480, 0x12543,
},
}
m["Yezi"] = process_ranges{
"Yezidi",
13175481,
"alphabet",
ranges = {
0x060C, 0x060C,
0x061B, 0x061B,
0x061F, 0x061F,
0x0660, 0x0669,
0x10E80, 0x10EA9,
0x10EAB, 0x10EAD,
0x10EB0, 0x10EB1,
},
direction = "rtl",
}
m["Yiii"] = process_ranges{
"Yi",
1197646,
"syllabary",
ranges = {
0x3001, 0x3002,
0x3008, 0x3011,
0x3014, 0x301B,
0x30FB, 0x30FB,
0xA000, 0xA48C,
0xA490, 0xA4C6,
0xFF61, 0xFF65,
},
}
m["Zanb"] = process_ranges{
"Zanabazar Square",
50809208,
"abugida",
ranges = {
0x11A00, 0x11A47,
},
}
m["Zmth"] = process_ranges{
"mathematical notation",
1140046,
ranges = {
0x00AC, 0x00AC,
0x00B1, 0x00B1,
0x00D7, 0x00D7,
0x00F7, 0x00F7,
0x03D0, 0x03D2,
0x03D5, 0x03D5,
0x03F0, 0x03F1,
0x03F4, 0x03F6,
0x0606, 0x0608,
0x2016, 0x2016,
0x2032, 0x2034,
0x2040, 0x2040,
0x2044, 0x2044,
0x2052, 0x2052,
0x205F, 0x205F,
0x2061, 0x2064,
0x207A, 0x207E,
0x208A, 0x208E,
0x20D0, 0x20DC,
0x20E1, 0x20E1,
0x20E5, 0x20E6,
0x20EB, 0x20EF,
0x2102, 0x2102,
0x2107, 0x2107,
0x210A, 0x2113,
0x2115, 0x2115,
0x2118, 0x211D,
0x2124, 0x2124,
0x2128, 0x2129,
0x212C, 0x212D,
0x212F, 0x2131,
0x2133, 0x2138,
0x213C, 0x2149,
0x214B, 0x214B,
0x2190, 0x21A7,
0x21A9, 0x21AE,
0x21B0, 0x21B1,
0x21B6, 0x21B7,
0x21BC, 0x21DB,
0x21DD, 0x21DD,
0x21E4, 0x21E5,
0x21F4, 0x22FF,
0x2308, 0x230B,
0x2320, 0x2321,
0x237C, 0x237C,
0x239B, 0x23B5,
0x23B7, 0x23B7,
0x23D0, 0x23D0,
0x23DC, 0x23E2,
0x25A0, 0x25A1,
0x25AE, 0x25B7,
0x25BC, 0x25C1,
0x25C6, 0x25C7,
0x25CA, 0x25CB,
0x25CF, 0x25D3,
0x25E2, 0x25E2,
0x25E4, 0x25E4,
0x25E7, 0x25EC,
0x25F8, 0x25FF,
0x2605, 0x2606,
0x2640, 0x2640,
0x2642, 0x2642,
0x2660, 0x2663,
0x266D, 0x266F,
0x27C0, 0x27FF,
0x2900, 0x2AFF,
0x2B30, 0x2B44,
0x2B47, 0x2B4C,
0xFB29, 0xFB29,
0xFE61, 0xFE66,
0xFE68, 0xFE68,
0xFF0B, 0xFF0B,
0xFF1C, 0xFF1E,
0xFF3C, 0xFF3C,
0xFF3E, 0xFF3E,
0xFF5C, 0xFF5C,
0xFF5E, 0xFF5E,
0xFFE2, 0xFFE2,
0xFFE9, 0xFFEC,
0x1D400, 0x1D454,
0x1D456, 0x1D49C,
0x1D49E, 0x1D49F,
0x1D4A2, 0x1D4A2,
0x1D4A5, 0x1D4A6,
0x1D4A9, 0x1D4AC,
0x1D4AE, 0x1D4B9,
0x1D4BB, 0x1D4BB,
0x1D4BD, 0x1D4C3,
0x1D4C5, 0x1D505,
0x1D507, 0x1D50A,
0x1D50D, 0x1D514,
0x1D516, 0x1D51C,
0x1D51E, 0x1D539,
0x1D53B, 0x1D53E,
0x1D540, 0x1D544,
0x1D546, 0x1D546,
0x1D54A, 0x1D550,
0x1D552, 0x1D6A5,
0x1D6A8, 0x1D7CB,
0x1D7CE, 0x1D7FF,
0x1EE00, 0x1EE03,
0x1EE05, 0x1EE1F,
0x1EE21, 0x1EE22,
0x1EE24, 0x1EE24,
0x1EE27, 0x1EE27,
0x1EE29, 0x1EE32,
0x1EE34, 0x1EE37,
0x1EE39, 0x1EE39,
0x1EE3B, 0x1EE3B,
0x1EE42, 0x1EE42,
0x1EE47, 0x1EE47,
0x1EE49, 0x1EE49,
0x1EE4B, 0x1EE4B,
0x1EE4D, 0x1EE4F,
0x1EE51, 0x1EE52,
0x1EE54, 0x1EE54,
0x1EE57, 0x1EE57,
0x1EE59, 0x1EE59,
0x1EE5B, 0x1EE5B,
0x1EE5D, 0x1EE5D,
0x1EE5F, 0x1EE5F,
0x1EE61, 0x1EE62,
0x1EE64, 0x1EE64,
0x1EE67, 0x1EE6A,
0x1EE6C, 0x1EE72,
0x1EE74, 0x1EE77,
0x1EE79, 0x1EE7C,
0x1EE7E, 0x1EE7E,
0x1EE80, 0x1EE89,
0x1EE8B, 0x1EE9B,
0x1EEA1, 0x1EEA3,
0x1EEA5, 0x1EEA9,
0x1EEAB, 0x1EEBB,
0x1EEF0, 0x1EEF1,
},
translit = false,
}
m["Zname"] = process_ranges{
"Znamenny musical notation",
965834,
"pictography",
ranges = {
0x1CF00, 0x1CF2D,
0x1CF30, 0x1CF46,
0x1CF50, 0x1CFC3,
},
ietf_subtag = "Zsym",
translit = false,
}
m["Zsym"] = process_ranges{
"symbolic",
80071,
"pictography",
ranges = {
0x20DD, 0x20E0,
0x20E2, 0x20E4,
0x20E7, 0x20EA,
0x20F0, 0x20F0,
0x2100, 0x2101,
0x2103, 0x2106,
0x2108, 0x2109,
0x2114, 0x2114,
0x2116, 0x2117,
0x211E, 0x2123,
0x2125, 0x2127,
0x212A, 0x212B,
0x212E, 0x212E,
0x2132, 0x2132,
0x2139, 0x213B,
0x214A, 0x214A,
0x214C, 0x214F,
0x21A8, 0x21A8,
0x21AF, 0x21AF,
0x21B2, 0x21B5,
0x21B8, 0x21BB,
0x21DC, 0x21DC,
0x21DE, 0x21E3,
0x21E6, 0x21F3,
0x2300, 0x2307,
0x230C, 0x231F,
0x2322, 0x237B,
0x237D, 0x239A,
0x23B6, 0x23B6,
0x23B8, 0x23CF,
0x23D1, 0x23DB,
0x23E3, 0x23FF,
0x2500, 0x259F,
0x25A2, 0x25AD,
0x25B8, 0x25BB,
0x25C2, 0x25C5,
0x25C8, 0x25C9,
0x25CC, 0x25CE,
0x25D4, 0x25E1,
0x25E3, 0x25E3,
0x25E5, 0x25E6,
0x25ED, 0x25F7,
0x2600, 0x2604,
0x2607, 0x263F,
0x2641, 0x2641,
0x2643, 0x265F,
0x2664, 0x266C,
0x2670, 0x27BF,
0x2B00, 0x2B2F,
0x2B45, 0x2B46,
0x2B4D, 0x2B73,
0x2B76, 0x2B95,
0x2B97, 0x2BFF,
0x4DC0, 0x4DFF,
0x1F000, 0x1F02B,
0x1F030, 0x1F093,
0x1F0A0, 0x1F0AE,
0x1F0B1, 0x1F0BF,
0x1F0C1, 0x1F0CF,
0x1F0D1, 0x1F0F5,
0x1F300, 0x1F6D7,
0x1F6DC, 0x1F6EC,
0x1F6F0, 0x1F6FC,
0x1F700, 0x1F776,
0x1F77B, 0x1F7D9,
0x1F7E0, 0x1F7EB,
0x1F7F0, 0x1F7F0,
0x1F800, 0x1F80B,
0x1F810, 0x1F847,
0x1F850, 0x1F859,
0x1F860, 0x1F887,
0x1F890, 0x1F8AD,
0x1F8B0, 0x1F8B1,
0x1F900, 0x1FA53,
0x1FA60, 0x1FA6D,
0x1FA70, 0x1FA7C,
0x1FA80, 0x1FA88,
0x1FA90, 0x1FABD,
0x1FABF, 0x1FAC5,
0x1FACE, 0x1FADB,
0x1FAE0, 0x1FAE8,
0x1FAF0, 0x1FAF8,
0x1FB00, 0x1FB92,
0x1FB94, 0x1FBCA,
0x1FBF0, 0x1FBF9,
},
translit = false,
character_category = false, -- none
}
m["Zyyy"] = {
"undetermined",
104839687,
-- This should not have any characters listed, probably
translit = false,
character_category = false, -- none
}
m["Zzzz"] = {
"uncoded",
104839675,
-- This should not have any characters listed
translit = false,
character_category = false, -- none
}
-- These should be defined after the scripts they are composed of.
m["Hrkt"] = process_ranges{
"Kana",
187659,
"syllabary",
aliases = {"Japanese syllabaries"},
ranges = union(
m["Hira"].ranges,
m["Kana"].ranges
),
spaces = false,
}
m["Jpan"] = process_ranges{
"Japanese",
190502,
"logography, syllabary",
ranges = union(
m["Hrkt"].ranges,
m["Hani"].ranges,
m["Latn"].ranges
),
spaces = false,
sort_by_scraping = true,
}
m["Kore"] = process_ranges{
"Korean",
711797,
"logography, syllabary",
ranges = union(
m["Hang"].ranges,
m["Hani"].ranges,
m["Latn"].ranges
),
-- `漢字(한자)`→`漢字`
-- `가-나-다`→`가나다`, `가--나--다`→`가-나-다`
-- `온돌(溫突/溫堗)`→`온돌` ([[ondol]])
strip_diacritics = {
remove_diacritics = u(0x302E) .. u(0x302F),
from = {"([" .. m["Hani"].characters .. "])%(.-%)", "^%-", "%-$", "%-(%-?)", "\1", "%([" .. m["Hani"].characters .. "/]+%)"},
to = {"%1", "\1", "\1", "%1", "-"}
}
}
return require("Module:languages").finalizeData(m, "script")
t9heizlomeu0qx9seby1qxmszj6ranz
মডিউল:translations
828
7286
507790
325977
2026-04-14T06:47:50Z
Redmin
6857
[[en:Module:translations|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507790
Scribunto
text/plain
local export = {}
local anchors_module = "Module:anchors"
local debug_track_module = "Module:debug/track"
local languages_module = "Module:languages"
local links_module = "Module:links"
local pages_module = "Module:pages"
local parameters_module = "Module:parameters"
local string_utilities_module = "Module:string utilities"
local templatestyles_module = "Module:TemplateStyles"
local utilities_module = "Module:utilities"
local wikimedia_languages_module = "Module:wikimedia languages"
local concat = table.concat
local html_create = mw.html.create
local insert = table.insert
local load_data = mw.loadData
local new_title = mw.title.new
local require = require
--[==[
Loaders for functions in other modules, which overwrite themselves with the target function when called. This ensures modules are only loaded when needed, retains the speed/convenience of locally-declared pre-loaded functions, and has no overhead after the first call, since the target functions are called directly in any subsequent calls.]==]
local function decode_uri(...)
decode_uri = require(string_utilities_module).decode_uri
return decode_uri(...)
end
local function format_categories(...)
format_categories = require(utilities_module).format_categories
return format_categories(...)
end
local function full_link(...)
full_link = require(links_module).full_link
return full_link(...)
end
local function get_link_page(...)
get_link_page = require(links_module).get_link_page
return get_link_page(...)
end
local function get_wikimedia_lang(...)
get_wikimedia_lang = require(wikimedia_languages_module).getByCode
return get_wikimedia_lang(...)
end
local function language_link(...)
language_link = require(links_module).language_link
return language_link(...)
end
local function normalize_anchor(...)
normalize_anchor = require(anchors_module).normalize_anchor
return normalize_anchor(...)
end
local function plain_link(...)
plain_link = require(links_module).plain_link
return plain_link(...)
end
local function process_params(...)
process_params = require(parameters_module).process
return process_params(...)
end
local function remove_links(...)
remove_links = require(links_module).remove_links
return remove_links(...)
end
local function split_on_slashes(...)
split_on_slashes = require(links_module).split_on_slashes
return split_on_slashes(...)
end
local function templatestyles(...)
templatestyles = require(templatestyles_module)
return templatestyles(...)
end
local function track(...)
track = require(debug_track_module)
return track(...)
end
--[==[
Loaders for objects, which load data (or some other object) into some variable, which can then be accessed as "foo or get_foo()", where the function get_foo sets the object to "foo" and then returns it. This ensures they are only loaded when needed, and avoids the need to check for the existence of the object each time, since once "foo" has been set, "get_foo" will not be called again.]==]
local en
local function get_en()
en, get_en = require(languages_module).getByCode("en"), nil
return en
end
local headword_data
local function get_headword_data()
headword_data, get_headword_data = load_data("Module:headword/data"), nil
return headword_data
end
local parameters_data
local function get_parameters_data()
parameters_data, get_parameters_data = load_data("Module:parameters/data"), nil
return parameters_data
end
local translations_data
local function get_translations_data()
translations_data, get_translations_data = load_data("Module:translations/data"), nil
return translations_data
end
local function is_translation_subpage(pagename)
if (headword_data or get_headword_data()).page.namespace ~= "" then
return false
elseif not pagename then
pagename = (headword_data or get_headword_data()).encoded_pagename
end
return pagename:match("./translations$") and true or false
end
local function canonical_pagename()
local pagename = (headword_data or get_headword_data()).encoded_pagename
return is_translation_subpage(pagename) and pagename:sub(1, -14) or pagename
end
local function interwiki(terminfo, term, lang, langcode)
-- No interwiki link if term is empty/missing
if not term or #term < 1 then
terminfo.interwiki = false
return
end
-- Percent-decode the term.
term = decode_uri(terminfo.term, "PATH")
-- Don't show an interwiki link if it's an invalid title.
if not new_title(term) then
terminfo.interwiki = false
return
end
local interwiki_langcode = (translations_data or get_translations_data()).interwiki_langs[langcode]
local wmlangs = interwiki_langcode and {get_wikimedia_lang(interwiki_langcode)} or lang:getWikimediaLanguages()
-- Don't show the interwiki link if the language is not recognised by Wikimedia.
if #wmlangs == 0 then
terminfo.interwiki = false
return
end
local sc = terminfo.sc
local target_page = get_link_page(term, lang, sc)
local split = split_on_slashes(target_page)
if not split[1] then
terminfo.interwiki = false
return
end
target_page = split[1]
local wmlangcode = wmlangs[1]:getCode()
local interwiki_link = language_link{
lang = lang,
sc = sc,
term = wmlangcode .. ":" .. target_page,
alt = "(" .. wmlangcode .. ")",
tr = "-"
}
terminfo.interwiki = tostring(html_create("span")
:addClass("tpos")
:wikitext(" " .. interwiki_link)
)
end
function export.show_terminfo(terminfo, check)
local lang = terminfo.lang
local langcode, langname = lang:getCode(), lang:getCanonicalName()
-- Translations must be for mainspace languages.
if not lang:hasType("regular") then
error("Translations must be for attested and approved main-namespace languages.")
else
local disallowed = (translations_data or get_translations_data()).disallowed
local err_msg = disallowed[langcode]
if err_msg then
error("Translations not allowed in " .. langname .. " (" .. langcode .. "). " .. langname .. " translations should " .. err_msg)
end
local fullcode = lang:getFullCode()
if fullcode ~= langcode then
err_msg = disallowed[fullcode]
if err_msg then
langname = lang:getFullName()
error("Translations not allowed in " .. langname .. " (" .. fullcode .. "). " .. langname .. " translations should " .. err_msg)
end
end
end
if langcode == "en" then
if terminfo.interwiki then
error("Interwiki translations not allowed for English; they should always link to a different Wiktionary")
end
local current_L2 = require(pages_module).get_current_L2()
if current_L2 ~= "Translingual" and mw.title.getCurrentTitle().nsText ~= "Wiktionary" then
if current_L2 then
error("English translations only allowed in Translingual section, not in " .. current_L2)
else
error("English translations only allowed in Translingual section, not outside of any L2")
end
end
end
local term = terminfo.term
-- Check if there is a term. Don't show the interwiki link if there is nothing to link to.
if not term then
-- Track entries that don't provide a term.
-- FIXME: This should be a category.
track("translations/no term")
track("translations/no term/" .. langcode)
end
if terminfo.interwiki then
interwiki(terminfo, term, lang, langcode)
end
langcode = lang:getFullCode()
if (translations_data or get_translations_data()).need_super[langcode] then
local tr = terminfo.tr
if tr ~= nil then
terminfo.tr = tr:gsub("%d[%d%*%-]*%f[^%d%*]", "<sup>%0</sup>")
end
end
terminfo.show_qualifiers = true
local link = full_link(terminfo, "translation")
local canonical_name = lang:getCanonicalName()
local full_name = lang:getFullName()
local categories = {"Terms with " .. canonical_name .. " translations"}
if canonical_name ~= full_name then
insert(categories, "Terms with " .. full_name .. " translations")
end
if check then
link = tostring(html_create("span")
:addClass("ttbc")
:tag("sup")
:addClass("ttbc")
:wikitext("(please [[WT:Translations#Translations to be checked|verify]])")
:done()
:wikitext(" " .. link)
)
insert(categories, "Requests for review of " .. langname .. " translations")
end
return link .. format_categories(categories, en or get_en(), nil, canonical_pagename())
end
-- Implements {{t}}, {{t+}}, {{t-check}} and {{t+check}}.
function export.show(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["translation"])
local check = frame.args.check
return export.show_terminfo({
lang = args[1],
sc = args.sc,
track_sc = true,
term = args[2],
alt = args.alt,
id = args.id,
genders = args[3],
tr = args.tr,
ts = args.ts,
lit = args.lit,
q = args.q,
qq = args.qq,
l = args.l,
ll = args.ll,
refs = args.ref,
interwiki = frame.args.interwiki,
}, check and check ~= "")
end
local function add_id(div, id)
return id and div:attr("id", normalize_anchor("Translations-" .. id)) or div
end
-- Implements {{trans-top}} and part of {{trans-top-also}}.
local function top(args, title, id, navhead)
local column_width = (args["column-width"] == "wide" or args["column-width"] == "narrow") and "-" .. args["column-width"] or ""
local div = html_create("div")
:addClass("NavFrame")
:node(navhead)
:tag("div")
:addClass("NavContent")
:tag("table")
:addClass("translations")
:attr("role", "presentation")
:attr("data-gloss", title or "")
:tag("tr")
:tag("td")
:addClass("translations-cell")
:addClass("multicolumn-list" .. column_width)
:attr("colspan", "3")
:allDone()
div = add_id(div, id)
local categories = {}
if not title then
insert(categories, "Translation table header lacks gloss")
end
local pagename = canonical_pagename()
if is_translation_subpage() then
insert(categories, "Translation subpages")
end
return (tostring(div):gsub("</td></tr></table></div></div>$", "")) ..
(#categories > 0 and format_categories(categories, en or get_en(), nil, pagename) or "") ..
-- Category to trigger [[MediaWiki:Gadget-TranslationAdder.js]]; we want this even on
-- user pages and such.
format_categories("Entries with translation boxes", nil, nil, nil, true) ..
templatestyles("Module:translations/styles.css")
end
-- Entry point for {{trans-top}}.
function export.top(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-top"])
local title = args[1]
local id = args.id or title
title = title and remove_links(title)
return top(args, title, id, html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(title or "Translations")
)
end
-- Entry point for {{checktrans-top}}.
function export.check_top(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["checktrans-top"])
local text = "\n:''The translations below need to be checked and inserted above into the appropriate translation tables. See instructions at " ..
frame:expandTemplate{
title = "section link",
args = {"Wiktionary:Entry layout#Translations"}
} ..
".''\n"
local header = html_create("div")
:addClass("checktrans")
:wikitext(text)
local subtitle = args[1]
local title = "Translations to be checked"
if subtitle then
title = title .. "‌: \"" .. subtitle .. "\""
end
-- No ID, since these should always accompany proper translation tables, and can't be trusted anyway (i.e. there's no use-case for links).
return tostring(header) .. "\n" .. top(args, title, nil, html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(title or "Translations")
)
end
-- Implements {{trans-bottom}}.
function export.bottom(frame)
-- Check nothing is being passed as a parameter.
process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-bottom"])
return "</table></div></div>"
end
-- Implements {{trans-see}} and part of {{trans-top-also}}.
local function see(args, see_text)
local navhead = html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(args[1] .. " ")
:tag("span")
:css("font-weight", "normal")
:wikitext("— ")
:tag("i")
:wikitext(see_text)
:allDone()
local terms, id = args[2], args.id
if #terms == 0 then
terms[1] = args[1]
end
for i = 1, #terms do
local term_id = id[i] or id.default
local data = {
term = terms[i],
id = term_id and "Translations-" .. term_id or "Translations",
}
terms[i] = plain_link(data)
end
return navhead:wikitext(concat(terms, ",‎ "))
end
-- Entry point for {{trans-see}}.
function export.see(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-see"])
local div = html_create("div")
:addClass("pseudo")
:addClass("NavFrame")
:node(see(args, "see "))
return tostring(add_id(div, args.id.default or args[1]))
end
-- Entry point for {{trans-top-also}}.
function export.top_also(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-top-also"])
local navhead = see(args, "see also ")
local title = args[1]
local id = args.id.default or title
title = remove_links(title)
return top(args, title, id, navhead)
end
-- Implements {{translation subpage}}.
function export.subpage(frame)
process_params(frame:getParent().args, (parameters_data or get_parameters_data())["translation subpage"])
if not is_translation_subpage() then
error("This template should only be used on translation subpages, which have titles that end with '/translations'.")
end
-- "Translation subpages" category is handled by {{trans-top}}.
return ("''This page contains translations for ''%s''. See the main entry for more information.''"):format(full_link{
lang = en or get_en(),
term = canonical_pagename(),
})
end
-- Implements {{t-needed}}.
function export.needed(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["t-needed"])
local lang, category = args[1], ""
local span = html_create("span")
:addClass("trreq")
:attr("data-lang", lang:getCode())
:tag("i")
:wikitext("please add this translation if you can")
:done()
if not args.nocat then
local type, sort = args[2], args.sort
if type == "quote" then
category = "Requests for translations of " .. lang:getCanonicalName() .. " quotations"
elseif type == "usex" then
category = "Requests for translations of " .. lang:getCanonicalName() .. " usage examples"
else
category = "Requests for translations into " .. lang:getCanonicalName()
lang = en or get_en()
end
category = format_categories(category, lang, sort, not sort and canonical_pagename() or nil)
end
return tostring(span) .. category
end
-- Implements {{no equivalent translation}}.
function export.no_equivalent(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["no equivalent translation"])
local text = "no equivalent term in " .. args[1]:getCanonicalName()
if not args.noend then
text = text .. ", but see"
end
return tostring(html_create("i"):wikitext(text))
end
-- Implements {{no attested translation}}.
function export.no_attested(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["no attested translation"])
local langname = args[1]:getCanonicalName()
local text = "no [[WT:ATTEST|attested]] term in " .. langname
local category = ""
if not args.noend then
text = text .. ", but see"
local sort = args.sort
category = format_categories(langname .. " unattested translations", en or get_en(), sort, not sort and canonical_pagename() or nil)
end
return tostring(html_create("i"):wikitext(text)) .. category
end
-- Implements {{not used}}.
function export.not_used(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["not used"])
return tostring(html_create("i"):wikitext((args[2] or "not used") .. " in " .. args[1]:getCanonicalName()))
end
return export
ddmeh30lm5qa67dbgom8ml4egmhwtjn
507806
507790
2026-04-14T08:58:53Z
Redmin
6857
507806
Scribunto
text/plain
local export = {}
local anchors_module = "Module:anchors"
local debug_track_module = "Module:debug/track"
local languages_module = "Module:languages"
local links_module = "Module:links"
local pages_module = "Module:pages"
local parameters_module = "Module:parameters"
local string_utilities_module = "Module:string utilities"
local templatestyles_module = "Module:TemplateStyles"
local utilities_module = "Module:utilities"
local wikimedia_languages_module = "Module:wikimedia languages"
local concat = table.concat
local html_create = mw.html.create
local insert = table.insert
local load_data = mw.loadData
local new_title = mw.title.new
local require = require
--[==[
Loaders for functions in other modules, which overwrite themselves with the target function when called. This ensures modules are only loaded when needed, retains the speed/convenience of locally-declared pre-loaded functions, and has no overhead after the first call, since the target functions are called directly in any subsequent calls.]==]
local function decode_uri(...)
decode_uri = require(string_utilities_module).decode_uri
return decode_uri(...)
end
local function format_categories(...)
format_categories = require(utilities_module).format_categories
return format_categories(...)
end
local function full_link(...)
full_link = require(links_module).full_link
return full_link(...)
end
local function get_link_page(...)
get_link_page = require(links_module).get_link_page
return get_link_page(...)
end
local function get_wikimedia_lang(...)
get_wikimedia_lang = require(wikimedia_languages_module).getByCode
return get_wikimedia_lang(...)
end
local function language_link(...)
language_link = require(links_module).language_link
return language_link(...)
end
local function normalize_anchor(...)
normalize_anchor = require(anchors_module).normalize_anchor
return normalize_anchor(...)
end
local function plain_link(...)
plain_link = require(links_module).plain_link
return plain_link(...)
end
local function process_params(...)
process_params = require(parameters_module).process
return process_params(...)
end
local function remove_links(...)
remove_links = require(links_module).remove_links
return remove_links(...)
end
local function split_on_slashes(...)
split_on_slashes = require(links_module).split_on_slashes
return split_on_slashes(...)
end
local function templatestyles(...)
templatestyles = require(templatestyles_module)
return templatestyles(...)
end
local function track(...)
track = require(debug_track_module)
return track(...)
end
--[==[
Loaders for objects, which load data (or some other object) into some variable, which can then be accessed as "foo or get_foo()", where the function get_foo sets the object to "foo" and then returns it. This ensures they are only loaded when needed, and avoids the need to check for the existence of the object each time, since once "foo" has been set, "get_foo" will not be called again.]==]
local en
local function get_en()
en, get_en = require(languages_module).getByCode("en"), nil
return en
end
local headword_data
local function get_headword_data()
headword_data, get_headword_data = load_data("Module:headword/data"), nil
return headword_data
end
local parameters_data
local function get_parameters_data()
parameters_data, get_parameters_data = load_data("Module:parameters/data"), nil
return parameters_data
end
local translations_data
local function get_translations_data()
translations_data, get_translations_data = load_data("Module:translations/data"), nil
return translations_data
end
local function is_translation_subpage(pagename)
if (headword_data or get_headword_data()).page.namespace ~= "" then
return false
elseif not pagename then
pagename = (headword_data or get_headword_data()).encoded_pagename
end
return pagename:match("./translations$") and true or false
end
local function canonical_pagename()
local pagename = (headword_data or get_headword_data()).encoded_pagename
return is_translation_subpage(pagename) and pagename:sub(1, -14) or pagename
end
local function interwiki(terminfo, term, lang, langcode)
-- No interwiki link if term is empty/missing
if not term or #term < 1 then
terminfo.interwiki = false
return
end
-- Percent-decode the term.
term = decode_uri(terminfo.term, "PATH")
-- Don't show an interwiki link if it's an invalid title.
if not new_title(term) then
terminfo.interwiki = false
return
end
local interwiki_langcode = (translations_data or get_translations_data()).interwiki_langs[langcode]
local wmlangs = interwiki_langcode and {get_wikimedia_lang(interwiki_langcode)} or lang:getWikimediaLanguages()
-- Don't show the interwiki link if the language is not recognised by Wikimedia.
if #wmlangs == 0 then
terminfo.interwiki = false
return
end
local sc = terminfo.sc
local target_page = get_link_page(term, lang, sc)
local split = split_on_slashes(target_page)
if not split[1] then
terminfo.interwiki = false
return
end
target_page = split[1]
local wmlangcode = wmlangs[1]:getCode()
local interwiki_link = language_link{
lang = lang,
sc = sc,
term = wmlangcode .. ":" .. target_page,
alt = "(" .. wmlangcode .. ")",
tr = "-"
}
terminfo.interwiki = tostring(html_create("span")
:addClass("tpos")
:wikitext(" " .. interwiki_link)
)
end
function export.show_terminfo(terminfo, check)
local lang = terminfo.lang
local langcode, langname = lang:getCode(), lang:getCanonicalName()
-- Translations must be for mainspace languages.
if not lang:hasType("regular") then
error("Translations must be for attested and approved main-namespace languages.")
else
local disallowed = (translations_data or get_translations_data()).disallowed
local err_msg = disallowed[langcode]
if err_msg then
error("Translations not allowed in " .. langname .. " (" .. langcode .. "). " .. langname .. " translations should " .. err_msg)
end
local fullcode = lang:getFullCode()
if fullcode ~= langcode then
err_msg = disallowed[fullcode]
if err_msg then
langname = lang:getFullName()
error("Translations not allowed in " .. langname .. " (" .. fullcode .. "). " .. langname .. " translations should " .. err_msg)
end
end
end
if langcode == "en" then
-- if terminfo.interwiki then
-- error("Interwiki translations not allowed for English; they should always link to a different Wiktionary")
-- end
local current_L2 = require(pages_module).get_current_L2()
if current_L2 ~= "Translingual" and mw.title.getCurrentTitle().nsText ~= "Wiktionary" then
if current_L2 then
error("English translations only allowed in Translingual section, not in " .. current_L2)
else
error("English translations only allowed in Translingual section, not outside of any L2")
end
end
end
local term = terminfo.term
-- Check if there is a term. Don't show the interwiki link if there is nothing to link to.
if not term then
-- Track entries that don't provide a term.
-- FIXME: This should be a category.
track("translations/no term")
track("translations/no term/" .. langcode)
end
if terminfo.interwiki then
interwiki(terminfo, term, lang, langcode)
end
langcode = lang:getFullCode()
if (translations_data or get_translations_data()).need_super[langcode] then
local tr = terminfo.tr
if tr ~= nil then
terminfo.tr = tr:gsub("%d[%d%*%-]*%f[^%d%*]", "<sup>%0</sup>")
end
end
terminfo.show_qualifiers = true
local link = full_link(terminfo, "translation")
local canonical_name = lang:getCanonicalName()
local full_name = lang:getFullName()
local categories = {"Terms with " .. canonical_name .. " translations"}
if canonical_name ~= full_name then
insert(categories, "Terms with " .. full_name .. " translations")
end
if check then
link = tostring(html_create("span")
:addClass("ttbc")
:tag("sup")
:addClass("ttbc")
:wikitext("(please [[WT:Translations#Translations to be checked|verify]])")
:done()
:wikitext(" " .. link)
)
insert(categories, "Requests for review of " .. langname .. " translations")
end
return link .. format_categories(categories, en or get_en(), nil, canonical_pagename())
end
-- Implements {{t}}, {{t+}}, {{t-check}} and {{t+check}}.
function export.show(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["translation"])
local check = frame.args.check
return export.show_terminfo({
lang = args[1],
sc = args.sc,
track_sc = true,
term = args[2],
alt = args.alt,
id = args.id,
genders = args[3],
tr = args.tr,
ts = args.ts,
lit = args.lit,
q = args.q,
qq = args.qq,
l = args.l,
ll = args.ll,
refs = args.ref,
interwiki = frame.args.interwiki,
}, check and check ~= "")
end
local function add_id(div, id)
return id and div:attr("id", normalize_anchor("Translations-" .. id)) or div
end
-- Implements {{trans-top}} and part of {{trans-top-also}}.
local function top(args, title, id, navhead)
local column_width = (args["column-width"] == "wide" or args["column-width"] == "narrow") and "-" .. args["column-width"] or ""
local div = html_create("div")
:addClass("NavFrame")
:node(navhead)
:tag("div")
:addClass("NavContent")
:tag("table")
:addClass("translations")
:attr("role", "presentation")
:attr("data-gloss", title or "")
:tag("tr")
:tag("td")
:addClass("translations-cell")
:addClass("multicolumn-list" .. column_width)
:attr("colspan", "3")
:allDone()
div = add_id(div, id)
local categories = {}
if not title then
insert(categories, "Translation table header lacks gloss")
end
local pagename = canonical_pagename()
if is_translation_subpage() then
insert(categories, "Translation subpages")
end
return (tostring(div):gsub("</td></tr></table></div></div>$", "")) ..
(#categories > 0 and format_categories(categories, en or get_en(), nil, pagename) or "") ..
-- Category to trigger [[MediaWiki:Gadget-TranslationAdder.js]]; we want this even on
-- user pages and such.
format_categories("Entries with translation boxes", nil, nil, nil, true) ..
templatestyles("Module:translations/styles.css")
end
-- Entry point for {{trans-top}}.
function export.top(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-top"])
local title = args[1]
local id = args.id or title
title = title and remove_links(title)
return top(args, title, id, html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(title or "Translations")
)
end
-- Entry point for {{checktrans-top}}.
function export.check_top(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["checktrans-top"])
local text = "\n:''The translations below need to be checked and inserted above into the appropriate translation tables. See instructions at " ..
frame:expandTemplate{
title = "section link",
args = {"Wiktionary:Entry layout#Translations"}
} ..
".''\n"
local header = html_create("div")
:addClass("checktrans")
:wikitext(text)
local subtitle = args[1]
local title = "Translations to be checked"
if subtitle then
title = title .. "‌: \"" .. subtitle .. "\""
end
-- No ID, since these should always accompany proper translation tables, and can't be trusted anyway (i.e. there's no use-case for links).
return tostring(header) .. "\n" .. top(args, title, nil, html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(title or "Translations")
)
end
-- Implements {{trans-bottom}}.
function export.bottom(frame)
-- Check nothing is being passed as a parameter.
process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-bottom"])
return "</table></div></div>"
end
-- Implements {{trans-see}} and part of {{trans-top-also}}.
local function see(args, see_text)
local navhead = html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(args[1] .. " ")
:tag("span")
:css("font-weight", "normal")
:wikitext("— ")
:tag("i")
:wikitext(see_text)
:allDone()
local terms, id = args[2], args.id
if #terms == 0 then
terms[1] = args[1]
end
for i = 1, #terms do
local term_id = id[i] or id.default
local data = {
term = terms[i],
id = term_id and "Translations-" .. term_id or "Translations",
}
terms[i] = plain_link(data)
end
return navhead:wikitext(concat(terms, ",‎ "))
end
-- Entry point for {{trans-see}}.
function export.see(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-see"])
local div = html_create("div")
:addClass("pseudo")
:addClass("NavFrame")
:node(see(args, "see "))
return tostring(add_id(div, args.id.default or args[1]))
end
-- Entry point for {{trans-top-also}}.
function export.top_also(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-top-also"])
local navhead = see(args, "see also ")
local title = args[1]
local id = args.id.default or title
title = remove_links(title)
return top(args, title, id, navhead)
end
-- Implements {{translation subpage}}.
function export.subpage(frame)
process_params(frame:getParent().args, (parameters_data or get_parameters_data())["translation subpage"])
if not is_translation_subpage() then
error("This template should only be used on translation subpages, which have titles that end with '/translations'.")
end
-- "Translation subpages" category is handled by {{trans-top}}.
return ("''This page contains translations for ''%s''. See the main entry for more information.''"):format(full_link{
lang = en or get_en(),
term = canonical_pagename(),
})
end
-- Implements {{t-needed}}.
function export.needed(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["t-needed"])
local lang, category = args[1], ""
local span = html_create("span")
:addClass("trreq")
:attr("data-lang", lang:getCode())
:tag("i")
:wikitext("please add this translation if you can")
:done()
if not args.nocat then
local type, sort = args[2], args.sort
if type == "quote" then
category = "Requests for translations of " .. lang:getCanonicalName() .. " quotations"
elseif type == "usex" then
category = "Requests for translations of " .. lang:getCanonicalName() .. " usage examples"
else
category = "Requests for translations into " .. lang:getCanonicalName()
lang = en or get_en()
end
category = format_categories(category, lang, sort, not sort and canonical_pagename() or nil)
end
return tostring(span) .. category
end
-- Implements {{no equivalent translation}}.
function export.no_equivalent(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["no equivalent translation"])
local text = "no equivalent term in " .. args[1]:getCanonicalName()
if not args.noend then
text = text .. ", but see"
end
return tostring(html_create("i"):wikitext(text))
end
-- Implements {{no attested translation}}.
function export.no_attested(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["no attested translation"])
local langname = args[1]:getCanonicalName()
local text = "no [[WT:ATTEST|attested]] term in " .. langname
local category = ""
if not args.noend then
text = text .. ", but see"
local sort = args.sort
category = format_categories(langname .. " unattested translations", en or get_en(), sort, not sort and canonical_pagename() or nil)
end
return tostring(html_create("i"):wikitext(text)) .. category
end
-- Implements {{not used}}.
function export.not_used(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["not used"])
return tostring(html_create("i"):wikitext((args[2] or "not used") .. " in " .. args[1]:getCanonicalName()))
end
return export
1dsteiwy6yk816aryinvj6zfosw460d
507807
507806
2026-04-14T08:59:55Z
Redmin
6857
507807
Scribunto
text/plain
local export = {}
local anchors_module = "Module:anchors"
local debug_track_module = "Module:debug/track"
local languages_module = "Module:languages"
local links_module = "Module:links"
local pages_module = "Module:pages"
local parameters_module = "Module:parameters"
local string_utilities_module = "Module:string utilities"
local templatestyles_module = "Module:TemplateStyles"
local utilities_module = "Module:utilities"
local wikimedia_languages_module = "Module:wikimedia languages"
local concat = table.concat
local html_create = mw.html.create
local insert = table.insert
local load_data = mw.loadData
local new_title = mw.title.new
local require = require
--[==[
Loaders for functions in other modules, which overwrite themselves with the target function when called. This ensures modules are only loaded when needed, retains the speed/convenience of locally-declared pre-loaded functions, and has no overhead after the first call, since the target functions are called directly in any subsequent calls.]==]
local function decode_uri(...)
decode_uri = require(string_utilities_module).decode_uri
return decode_uri(...)
end
local function format_categories(...)
format_categories = require(utilities_module).format_categories
return format_categories(...)
end
local function full_link(...)
full_link = require(links_module).full_link
return full_link(...)
end
local function get_link_page(...)
get_link_page = require(links_module).get_link_page
return get_link_page(...)
end
local function get_wikimedia_lang(...)
get_wikimedia_lang = require(wikimedia_languages_module).getByCode
return get_wikimedia_lang(...)
end
local function language_link(...)
language_link = require(links_module).language_link
return language_link(...)
end
local function normalize_anchor(...)
normalize_anchor = require(anchors_module).normalize_anchor
return normalize_anchor(...)
end
local function plain_link(...)
plain_link = require(links_module).plain_link
return plain_link(...)
end
local function process_params(...)
process_params = require(parameters_module).process
return process_params(...)
end
local function remove_links(...)
remove_links = require(links_module).remove_links
return remove_links(...)
end
local function split_on_slashes(...)
split_on_slashes = require(links_module).split_on_slashes
return split_on_slashes(...)
end
local function templatestyles(...)
templatestyles = require(templatestyles_module)
return templatestyles(...)
end
local function track(...)
track = require(debug_track_module)
return track(...)
end
--[==[
Loaders for objects, which load data (or some other object) into some variable, which can then be accessed as "foo or get_foo()", where the function get_foo sets the object to "foo" and then returns it. This ensures they are only loaded when needed, and avoids the need to check for the existence of the object each time, since once "foo" has been set, "get_foo" will not be called again.]==]
local en
local function get_en()
en, get_en = require(languages_module).getByCode("en"), nil
return en
end
local headword_data
local function get_headword_data()
headword_data, get_headword_data = load_data("Module:headword/data"), nil
return headword_data
end
local parameters_data
local function get_parameters_data()
parameters_data, get_parameters_data = load_data("Module:parameters/data"), nil
return parameters_data
end
local translations_data
local function get_translations_data()
translations_data, get_translations_data = load_data("Module:translations/data"), nil
return translations_data
end
local function is_translation_subpage(pagename)
if (headword_data or get_headword_data()).page.namespace ~= "" then
return false
elseif not pagename then
pagename = (headword_data or get_headword_data()).encoded_pagename
end
return pagename:match("./translations$") and true or false
end
local function canonical_pagename()
local pagename = (headword_data or get_headword_data()).encoded_pagename
return is_translation_subpage(pagename) and pagename:sub(1, -14) or pagename
end
local function interwiki(terminfo, term, lang, langcode)
-- No interwiki link if term is empty/missing
if not term or #term < 1 then
terminfo.interwiki = false
return
end
-- Percent-decode the term.
term = decode_uri(terminfo.term, "PATH")
-- Don't show an interwiki link if it's an invalid title.
if not new_title(term) then
terminfo.interwiki = false
return
end
local interwiki_langcode = (translations_data or get_translations_data()).interwiki_langs[langcode]
local wmlangs = interwiki_langcode and {get_wikimedia_lang(interwiki_langcode)} or lang:getWikimediaLanguages()
-- Don't show the interwiki link if the language is not recognised by Wikimedia.
if #wmlangs == 0 then
terminfo.interwiki = false
return
end
local sc = terminfo.sc
local target_page = get_link_page(term, lang, sc)
local split = split_on_slashes(target_page)
if not split[1] then
terminfo.interwiki = false
return
end
target_page = split[1]
local wmlangcode = wmlangs[1]:getCode()
local interwiki_link = language_link{
lang = lang,
sc = sc,
term = wmlangcode .. ":" .. target_page,
alt = "(" .. wmlangcode .. ")",
tr = "-"
}
terminfo.interwiki = tostring(html_create("span")
:addClass("tpos")
:wikitext(" " .. interwiki_link)
)
end
function export.show_terminfo(terminfo, check)
local lang = terminfo.lang
local langcode, langname = lang:getCode(), lang:getCanonicalName()
-- Translations must be for mainspace languages.
if not lang:hasType("regular") then
error("Translations must be for attested and approved main-namespace languages.")
else
local disallowed = (translations_data or get_translations_data()).disallowed
local err_msg = disallowed[langcode]
if err_msg then
error("Translations not allowed in " .. langname .. " (" .. langcode .. "). " .. langname .. " translations should " .. err_msg)
end
local fullcode = lang:getFullCode()
if fullcode ~= langcode then
err_msg = disallowed[fullcode]
if err_msg then
langname = lang:getFullName()
error("Translations not allowed in " .. langname .. " (" .. fullcode .. "). " .. langname .. " translations should " .. err_msg)
end
end
end
--if langcode == "en" then
-- if terminfo.interwiki then
-- error("Interwiki translations not allowed for English; they should always link to a different Wiktionary")
-- end
-- local current_L2 = require(pages_module).get_current_L2()
--if current_L2 ~= "Translingual" and mw.title.getCurrentTitle().nsText ~= "Wiktionary" then
-- if current_L2 then
-- error("English translations only allowed in Translingual section, not in " .. current_L2)
-- else
-- error("English translations only allowed in Translingual section, not outside of any L2")
-- end
-- end
-- end
local term = terminfo.term
-- Check if there is a term. Don't show the interwiki link if there is nothing to link to.
if not term then
-- Track entries that don't provide a term.
-- FIXME: This should be a category.
track("translations/no term")
track("translations/no term/" .. langcode)
end
if terminfo.interwiki then
interwiki(terminfo, term, lang, langcode)
end
langcode = lang:getFullCode()
if (translations_data or get_translations_data()).need_super[langcode] then
local tr = terminfo.tr
if tr ~= nil then
terminfo.tr = tr:gsub("%d[%d%*%-]*%f[^%d%*]", "<sup>%0</sup>")
end
end
terminfo.show_qualifiers = true
local link = full_link(terminfo, "translation")
local canonical_name = lang:getCanonicalName()
local full_name = lang:getFullName()
local categories = {"Terms with " .. canonical_name .. " translations"}
if canonical_name ~= full_name then
insert(categories, "Terms with " .. full_name .. " translations")
end
if check then
link = tostring(html_create("span")
:addClass("ttbc")
:tag("sup")
:addClass("ttbc")
:wikitext("(please [[WT:Translations#Translations to be checked|verify]])")
:done()
:wikitext(" " .. link)
)
insert(categories, "Requests for review of " .. langname .. " translations")
end
return link .. format_categories(categories, en or get_en(), nil, canonical_pagename())
end
-- Implements {{t}}, {{t+}}, {{t-check}} and {{t+check}}.
function export.show(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["translation"])
local check = frame.args.check
return export.show_terminfo({
lang = args[1],
sc = args.sc,
track_sc = true,
term = args[2],
alt = args.alt,
id = args.id,
genders = args[3],
tr = args.tr,
ts = args.ts,
lit = args.lit,
q = args.q,
qq = args.qq,
l = args.l,
ll = args.ll,
refs = args.ref,
interwiki = frame.args.interwiki,
}, check and check ~= "")
end
local function add_id(div, id)
return id and div:attr("id", normalize_anchor("Translations-" .. id)) or div
end
-- Implements {{trans-top}} and part of {{trans-top-also}}.
local function top(args, title, id, navhead)
local column_width = (args["column-width"] == "wide" or args["column-width"] == "narrow") and "-" .. args["column-width"] or ""
local div = html_create("div")
:addClass("NavFrame")
:node(navhead)
:tag("div")
:addClass("NavContent")
:tag("table")
:addClass("translations")
:attr("role", "presentation")
:attr("data-gloss", title or "")
:tag("tr")
:tag("td")
:addClass("translations-cell")
:addClass("multicolumn-list" .. column_width)
:attr("colspan", "3")
:allDone()
div = add_id(div, id)
local categories = {}
if not title then
insert(categories, "Translation table header lacks gloss")
end
local pagename = canonical_pagename()
if is_translation_subpage() then
insert(categories, "Translation subpages")
end
return (tostring(div):gsub("</td></tr></table></div></div>$", "")) ..
(#categories > 0 and format_categories(categories, en or get_en(), nil, pagename) or "") ..
-- Category to trigger [[MediaWiki:Gadget-TranslationAdder.js]]; we want this even on
-- user pages and such.
format_categories("Entries with translation boxes", nil, nil, nil, true) ..
templatestyles("Module:translations/styles.css")
end
-- Entry point for {{trans-top}}.
function export.top(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-top"])
local title = args[1]
local id = args.id or title
title = title and remove_links(title)
return top(args, title, id, html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(title or "Translations")
)
end
-- Entry point for {{checktrans-top}}.
function export.check_top(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["checktrans-top"])
local text = "\n:''The translations below need to be checked and inserted above into the appropriate translation tables. See instructions at " ..
frame:expandTemplate{
title = "section link",
args = {"Wiktionary:Entry layout#Translations"}
} ..
".''\n"
local header = html_create("div")
:addClass("checktrans")
:wikitext(text)
local subtitle = args[1]
local title = "Translations to be checked"
if subtitle then
title = title .. "‌: \"" .. subtitle .. "\""
end
-- No ID, since these should always accompany proper translation tables, and can't be trusted anyway (i.e. there's no use-case for links).
return tostring(header) .. "\n" .. top(args, title, nil, html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(title or "Translations")
)
end
-- Implements {{trans-bottom}}.
function export.bottom(frame)
-- Check nothing is being passed as a parameter.
process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-bottom"])
return "</table></div></div>"
end
-- Implements {{trans-see}} and part of {{trans-top-also}}.
local function see(args, see_text)
local navhead = html_create("div")
:addClass("NavHead")
:css("text-align", "left")
:wikitext(args[1] .. " ")
:tag("span")
:css("font-weight", "normal")
:wikitext("— ")
:tag("i")
:wikitext(see_text)
:allDone()
local terms, id = args[2], args.id
if #terms == 0 then
terms[1] = args[1]
end
for i = 1, #terms do
local term_id = id[i] or id.default
local data = {
term = terms[i],
id = term_id and "Translations-" .. term_id or "Translations",
}
terms[i] = plain_link(data)
end
return navhead:wikitext(concat(terms, ",‎ "))
end
-- Entry point for {{trans-see}}.
function export.see(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-see"])
local div = html_create("div")
:addClass("pseudo")
:addClass("NavFrame")
:node(see(args, "see "))
return tostring(add_id(div, args.id.default or args[1]))
end
-- Entry point for {{trans-top-also}}.
function export.top_also(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["trans-top-also"])
local navhead = see(args, "see also ")
local title = args[1]
local id = args.id.default or title
title = remove_links(title)
return top(args, title, id, navhead)
end
-- Implements {{translation subpage}}.
function export.subpage(frame)
process_params(frame:getParent().args, (parameters_data or get_parameters_data())["translation subpage"])
if not is_translation_subpage() then
error("This template should only be used on translation subpages, which have titles that end with '/translations'.")
end
-- "Translation subpages" category is handled by {{trans-top}}.
return ("''This page contains translations for ''%s''. See the main entry for more information.''"):format(full_link{
lang = en or get_en(),
term = canonical_pagename(),
})
end
-- Implements {{t-needed}}.
function export.needed(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["t-needed"])
local lang, category = args[1], ""
local span = html_create("span")
:addClass("trreq")
:attr("data-lang", lang:getCode())
:tag("i")
:wikitext("please add this translation if you can")
:done()
if not args.nocat then
local type, sort = args[2], args.sort
if type == "quote" then
category = "Requests for translations of " .. lang:getCanonicalName() .. " quotations"
elseif type == "usex" then
category = "Requests for translations of " .. lang:getCanonicalName() .. " usage examples"
else
category = "Requests for translations into " .. lang:getCanonicalName()
lang = en or get_en()
end
category = format_categories(category, lang, sort, not sort and canonical_pagename() or nil)
end
return tostring(span) .. category
end
-- Implements {{no equivalent translation}}.
function export.no_equivalent(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["no equivalent translation"])
local text = "no equivalent term in " .. args[1]:getCanonicalName()
if not args.noend then
text = text .. ", but see"
end
return tostring(html_create("i"):wikitext(text))
end
-- Implements {{no attested translation}}.
function export.no_attested(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["no attested translation"])
local langname = args[1]:getCanonicalName()
local text = "no [[WT:ATTEST|attested]] term in " .. langname
local category = ""
if not args.noend then
text = text .. ", but see"
local sort = args.sort
category = format_categories(langname .. " unattested translations", en or get_en(), sort, not sort and canonical_pagename() or nil)
end
return tostring(html_create("i"):wikitext(text)) .. category
end
-- Implements {{not used}}.
function export.not_used(frame)
local args = process_params(frame:getParent().args, (parameters_data or get_parameters_data())["not used"])
return tostring(html_create("i"):wikitext((args[2] or "not used") .. " in " .. args[1]:getCanonicalName()))
end
return export
s2m4wlzslo0un9qdnxyde6f305o5g8u
মডিউল:links/templates
828
7294
507797
323748
2026-04-14T07:05:20Z
Redmin
6857
[[en:Module:links/templates|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507797
Scribunto
text/plain
-- Prevent substitution.
if mw.isSubsting() then
return require("Module:unsubst")
end
local export = {}
local links_module = "Module:links"
local process_params = require("Module:parameters").process
local remove = table.remove
local upper = require("Module:string utilities").upper
--[=[
Modules used:
[[Module:links]]
[[Module:languages]]
[[Module:scripts]]
[[Module:parameters]]
[[Module:debug]]
]=]
do
local function get_args(frame)
-- `compat` is a compatibility mode for {{term}}.
-- If given a nonempty value, the function uses lang= to specify the
-- language, and all the positional parameters shift one number lower.
local iargs = frame.args
iargs.compat = iargs.compat and iargs.compat ~= ""
iargs.langname = iargs.langname and iargs.langname ~= ""
iargs.notself = iargs.notself and iargs.notself ~= ""
local alias_of_4 = {alias_of = 4}
local boolean = {type = "boolean"}
local params = {
[1] = {required = true, type = "language", default = "und"},
[2] = true,
[3] = true,
[4] = true,
g = {list = true, type = "genders", flatten = true},
gloss = alias_of_4,
id = true,
lit = true,
ng = true,
pos = true,
sc = {type = "script"},
t = alias_of_4,
tr = true,
ts = true,
q = {type = "qualifier"},
qq = {type = "qualifier"},
l = {type = "labels"},
ll = {type = "labels"},
ref = {type = "references"},
["accel-form"] = true,
["accel-translit"] = true,
["accel-lemma"] = true,
["accel-lemma-translit"] = true,
["accel-gender"] = true,
["accel-nostore"] = boolean,
}
if iargs.compat then
params.lang = {type = "language", default = "und"}
remove(params, 1)
alias_of_4.alias_of = 3
end
if iargs.langname then
params.w = boolean
end
return process_params(frame:getParent().args, params), iargs
end
-- Used in [[Template:l]] and [[Template:m]].
function export.l_term_t(frame)
local args, iargs = get_args(frame)
local compat = iargs.compat
local lang = args[compat and "lang" or 1]
-- Tracking for und.
if not compat and lang:getCode() == "und" then
require("Module:debug").track("link/und")
end
local term = args[(compat and 1 or 2)]
local alt = args[(compat and 2 or 3)]
term = term ~= "" and term or nil
if not term and not alt and iargs.demo then
term = iargs.demo
end
local langname = iargs.langname and (
args.w and lang:makeWikipediaLink() or
lang:getCanonicalName()
) or nil
if langname and term == "-" then
return langname
end
-- Forward the information to full_link
return (langname and langname .. " " or "") .. require(links_module).full_link(
{
lang = lang,
sc = args.sc,
track_sc = true,
term = term,
alt = alt,
gloss = args[4],
id = args.id,
tr = args.tr,
ts = args.ts,
genders = args.g,
pos = args.pos,
ng = args.ng,
lit = args.lit,
q = args.q,
qq = args.qq,
l = args.l,
ll = args.ll,
refs = args.ref,
show_qualifiers = true,
accel = args["accel-form"] and {
form = args["accel-form"],
translit = args["accel-translit"],
lemma = args["accel-lemma"],
lemma_translit = args["accel-lemma-translit"],
gender = args["accel-gender"],
nostore = args["accel-nostore"],
} or nil
},
iargs.face,
not iargs.notself
)
end
-- Used in [[Template:link-annotations]].
function export.l_annotations_t(frame)
local args, iargs = get_args(frame)
-- Forward the information to format_link_annotations
return require(links_module).format_link_annotations(
{
lang = args[1],
tr = { args.tr },
ts = { args.ts },
genders = args.g,
pos = args.pos,
ng = args.ng,
lit = args.lit
},
iargs.face
)
end
end
-- Used in [[Template:ll]].
do
local function get_args(frame)
return process_params(frame:getParent().args, {
[1] = {required = true, type = "language", default = "und"},
[2] = {allow_empty = true},
[3] = true,
id = true,
sc = {type = "script"},
})
end
function export.ll(frame)
local args = get_args(frame)
local lang = args[1]
local sc = args.sc
local term = args[2]
term = term ~= "" and term or nil
return require(links_module).language_link{
lang = lang,
sc = sc,
term = term,
alt = args[3],
id = args.id
} or "<small>[Term?]</small>" ..
require("Module:utilities").format_categories(
{lang:getFullName() .. " term requests"},
lang, "-", nil, nil, sc
)
end
end
function export.def_t(frame)
local args = process_params(frame:getParent().args, {
[1] = {required = true, default = ""},
})
local face = frame.args.face
local ret = require("Module:script utilities").tag_definition(require(links_module).embedded_language_links{
term = args[1],
lang = require("Module:languages").getByCode("en"),
sc = require("Module:scripts").getByCode("Latn")
}, face)
if face == "non-gloss" then
return ret
end
return '<span class="mention-gloss-paren">(</span>' .. ret .. '<span class="mention-gloss-paren">)</span>'
end
function export.linkify_t(frame)
local args = process_params(frame:getParent().args, {
[1] = {required = true, default = ""},
})
args[1] = mw.text.trim(args[1])
if args[1] == "" or args[1]:find("[[", nil, true) then
return args[1]
end
return "[[" .. args[1] .. "]]"
end
function export.cap_t(frame)
local args = process_params(frame:getParent().args, {
[1] = {required = true},
[2] = true,
lang = {type = "language", default = "en"},
})
local term = args[1]
return require(links_module).full_link{
lang = args.lang,
term = term,
alt = term:gsub("^.[\128-\191]*", upper) .. (args[2] or "")
}
end
function export.section_link_t(frame)
local args = process_params(frame:getParent().args, {
[1] = {},
})
return require(links_module).section_link(args[1])
end
return export
49ywpqwyhc1meeszxumsw7m3fxck38v
মডিউল:parameters
828
12033
507792
457197
2026-04-14T06:49:14Z
Redmin
6857
[[en:Module:parameters|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507792
Scribunto
text/plain
--[==[TODO:
* Change certain flag names, as some are misnomers:
* Change `allow_holes` to `keep_holes`, because it's not the inverse of `disallow_holes`.
* Change `allow_empty` to `keep_empty`, as it causes them to be kept as "" instead of deleted.
* Sort out all the internal error calls. Manual error(format()) calls are used when certain parameters shouldn't be dumped, so find a way to avoid that.
]==]
local export = {}
local collation_module = "Module:collation"
local families_module = "Module:families"
local functions_module = "Module:fun"
local gender_and_number_utilities_module = "Module:gender and number utilities"
local labels_module = "Module:labels"
local languages_module = "Module:languages"
local math_module = "Module:math"
local pages_module = "Module:pages"
local parameters_finalize_set_module = "Module:parameters/finalizeSet"
local parameters_track_module = "Module:parameters/track"
local parse_utilities_module = "Module:parse utilities"
local references_module = "Module:references"
local scribunto_module = "Module:Scribunto"
local scripts_module = "Module:scripts"
local string_utilities_module = "Module:string utilities"
local table_module = "Module:table"
local wikimedia_languages_module = "Module:wikimedia languages"
local yesno_module = "Module:yesno"
local mw = mw
local mw_title = mw.title
local string = string
local table = table
local dump = mw.dumpObject
local find = string.find
local format = string.format
local gsub = string.gsub
local insert = table.insert
local ipairs = ipairs
local list_to_text = mw.text.listToText
local make_title = mw_title.makeTitle
local match = string.match
local max = math.max
local new_title = mw_title.new
local next = next
local pairs = pairs
local pcall = pcall
local require = require
local sub = string.sub
local tonumber = tonumber
local type = type
local unpack = unpack or table.unpack -- Lua 5.2 compatibility
local current_title_text, current_namespace, sets -- Defined when needed.
local namespaces = mw.site.namespaces
--[==[
Loaders for functions in other modules, which overwrite themselves with the target function when called. This ensures modules are only loaded when needed, retains the speed/convenience of locally-declared pre-loaded functions, and has no overhead after the first call, since the target functions are called directly in any subsequent calls.]==]
local function decode_entities(...)
decode_entities = require(string_utilities_module).decode_entities
return decode_entities(...)
end
local function extend(...)
extend = require(table_module).extend
return extend(...)
end
local function finalize_set(...)
finalize_set = require(parameters_finalize_set_module)
return finalize_set(...)
end
local function get_family_by_code(...)
get_family_by_code = require(families_module).getByCode
return get_family_by_code(...)
end
local function get_family_by_name(...)
get_family_by_name = require(families_module).getByCanonicalName
return get_family_by_name(...)
end
local function get_language_by_code(...)
get_language_by_code = require(languages_module).getByCode
return get_language_by_code(...)
end
local function get_language_by_name(...)
get_language_by_name = require(languages_module).getByCanonicalName
return get_language_by_name(...)
end
local function get_script_by_code(...)
get_script_by_code = require(scripts_module).getByCode
return get_script_by_code(...)
end
local function get_script_by_name(...)
get_script_by_name = require(scripts_module).getByCanonicalName
return get_script_by_name(...)
end
local function get_wm_lang_by_code(...)
get_wm_lang_by_code = require(wikimedia_languages_module).getByCode
return get_wm_lang_by_code(...)
end
local function get_wm_lang_by_code_with_fallback(...)
get_wm_lang_by_code_with_fallback = require(wikimedia_languages_module).getByCodeWithFallback
return get_wm_lang_by_code_with_fallback(...)
end
local function gsplit(...)
gsplit = require(string_utilities_module).gsplit
return gsplit(...)
end
local function is_callable(...)
is_callable = require(functions_module).is_callable
return is_callable(...)
end
local function is_integer(...)
is_integer = require(math_module).is_integer
return is_integer(...)
end
local function is_internal_title(...)
is_internal_title = require(pages_module).is_internal_title
return is_internal_title(...)
end
local function is_positive_integer(...)
is_positive_integer = require(math_module).is_positive_integer
return is_positive_integer(...)
end
local function iterate_list(...)
iterate_list = require(table_module).iterateList
return iterate_list(...)
end
local function num_keys(...)
num_keys = require(table_module).numKeys
return num_keys(...)
end
local function parse_gender_and_number_spec(...)
parse_gender_and_number_spec = require(gender_and_number_utilities_module).parse_gender_and_number_spec
return parse_gender_and_number_spec(...)
end
local function parse_references(...)
parse_references = require(references_module).parse_references
return parse_references(...)
end
local function pattern_escape(...)
pattern_escape = require(string_utilities_module).pattern_escape
return pattern_escape(...)
end
local function php_trim(...)
php_trim = require(scribunto_module).php_trim
return php_trim(...)
end
local function scribunto_parameter_key(...)
scribunto_parameter_key = require(scribunto_module).scribunto_parameter_key
return scribunto_parameter_key(...)
end
local function sort(...)
sort = require(collation_module).sort
return sort(...)
end
local function sorted_pairs(...)
sorted_pairs = require(table_module).sortedPairs
return sorted_pairs(...)
end
local function split(...)
split = require(string_utilities_module).split
return split(...)
end
local function split_labels_on_comma(...)
split_labels_on_comma = require(labels_module).split_labels_on_comma
return split_labels_on_comma(...)
end
local function split_on_comma(...)
split_on_comma = require(parse_utilities_module).split_on_comma
return split_on_comma(...)
end
local function tonumber_extended(...)
tonumber_extended = require(math_module).tonumber_extended
return tonumber_extended(...)
end
local function track(...)
track = require(parameters_track_module)
return track(...)
end
local function yesno(...)
yesno = require(yesno_module)
return yesno(...)
end
--[==[ intro:
This module is used to standardize template argument processing and checking. A typical workflow is as follows (based
on [[Module:translations]]):
{
...
local parent_args = frame:getParent().args
local params = {
[1] = {required = true, type = "language", default = "und"},
[2] = true,
[3] = {list = true},
["alt"] = true,
["id"] = true,
["sc"] = {type = "script"},
["tr"] = true,
["ts"] = true,
["lit"] = true,
}
local args = require("Module:parameters").process(parent_args, params)
-- Do further processing of the parsed arguments in `args`.
...
}
The `params` table should have the parameter names as the keys, and a (possibly empty) table of parameter tags as the
value. An empty table as the value merely states that the parameter exists, but should not receive any special
treatment; if desired, empty tables can be replaced with the value `true` as a perforamnce optimization.
Possible parameter tags are listed below:
; {required = true}
: The parameter is required; an error is shown if it is not present. The template's page itself is an exception; no
error is shown there.
; {default =}
: Specifies a default input value for the parameter, if it is absent or empty. This will be processed as though it were
the input instead, so (for example) {default = "und"} with the type {"language"} will return a language object for
[[:Category:Undetermined language|Undetermined language]] if no language code is provided. When used on list
parameters, this specifies a default value for the first item in the list only. Note that it is not possible to
generate a default that depends on the value of other parameters. If used together with {required = true}, the default
applies only to template pages (see the following entry), as a side effect of the fact that "required" parameters
aren't actually required on template pages. This can be used to show an example of the template in action when the
template page is visited; however, it is preferred to use `template_default` for this purpose, for clarity.
; {template_default =}
: Specifies a default input value for absent or empty parameters only on the template demo invocation (the invocation of
the template that is displayed when the template page that implements the template is viewed). Template pages are
pages in template space that invoke (through {{tl|#invoke:}}) the module that implements the template and calls
[[Module:parameters]]. For example, the page [[Template:en-noun]] implements the {{tl|en-noun}} template, which in
turn invokes [[Module:en-headword]], and is a template page for [[Module:en-headword]]. When the template page
[[Template:en-noun]] is visited, the {{tl|#invoke:}} of the template's module is expanded as if the template were
called without arguments, and the output is inserted at that point into the processed page. This output serves as a
sort of demo of the template's functionality. `template_default` can be used to supply default values for use only in
this demo. Since the template page may also contain other invocations of the same template (e.g. on the template's
documentation page, which is typically transcluded into the template page itself), `template_default` does not apply
if there are any arguments passed to the template or if the template is invoked on any other page but its own template
page (which is checked by comparing the name of the invoking template to the current pagename). Both
`template_default` and `default` can be specified for the same parameter. If this is done, `template_default` applies
for the argumentless template invocation on the template page, and `default` in all other circumstances As an example,
{{tl|cs-IPA}} uses the equivalent of {[1] = {default = "+", template_default = "příklad"}} to supply a default of
{"+"} for mainspace and documentation pages (which tells the module to use the value of the {{para|pagename}}
parameter, falling back to the actual pagename), but {"příklad"} (which means "example"), on [[Template:cs-IPA]].
; {alias_of =}
: Treat the parameter as an alias of another. When arguments are specified for this parameter, they will automatically
be renamed and stored under the alias name. This allows for parameters with multiple alternative names, while still
treating them as if they had only one name. The conversion-related properties of an aliased parameter (e.g. `type`,
`set`, `convert`, `sublist`) are taken from the aliasee, and the corrresponding properties set on the alias itself
are ignored; but other properties on the alias are taken from the alias's spec and not from the aliasee's spec. This
means, for example, that if you create an alias of a list parameter, the alias must also specify the `list` property
or it is not a list. (In such a case, a value specified for the alias goes into the first item of the aliasee's list.
You cannot make a list alias of a non-list parameter; this causes an error to be thrown.) Similarly, if you specify
`separate_no_index` on an aliasee but not on the alias, uses of the unindexed aliasee parameter are stored into the
`.default` key, but uses of the unindexed alias are stored into the first numbered key of the aliasee's list.
Aliases cannot be required, as this prevents the other name or names of the parameter from being used. Parameters
that are aliases and required at the same time cause an error to be thrown.
; {allow_empty = true}
: If the argument is an empty string value, it is not converted to {nil}, but kept as-is. The use of `allow_empty` is
disallowed if a type has been specified, and causes an error to be thrown.
; {no_trim = true}
: Spacing characters such as spaces and newlines at the beginning and end of a positional parameter are not removed.
(MediaWiki itself automatically trims spaces and newlines at the edge of named parameters.) The use of `no_trim` is
disallowed if a type has been specified, and causes an error to be thrown.
; {type =}
: Specifies what value type to convert the argument into. The default is to leave it as a text string. Alternatives are:
:; {type = "boolean"}
:: The value is treated as a boolean value, either true or false. No value, the empty string, and the strings {"0"},
{"no"}, {"n"}, {"false"}, {"f"} and {"off"} are treated as {false}, all other values are considered {true}.
:; {type = "number"}
:: The value is converted into a number, and throws an error if the value is not parsable as a number. Input values may
be signed (`+` or `-`), and may contain decimal points and leading zeroes. If {allow_hex = true}, then hexadecimal
values in the form {"0x100"} may optionally be used instead, which otherwise have the same syntax restrictions
(including signs, decimal digits, and leading zeroes after {"0x"}). Hexadecimal inputs are not case-sensitive. Lua's
special number values (`inf` and `nan`) are not possible inputs.
:; {type = "range"}
:: The value is interpreted as a hyphen-separated range of two numbers (e.g. {"2-4"} is interpreted as the range from
{2} to {4}). A number input without a hyphen is interpreted as a range from that number to itself (e.g. the input {"1"} is interpreted as the range from {1} to {1}). Any optional flags which are available for numbers will also work for ranges.
:; {type = "language"}
:: The value is interpreted as a full or [[Wiktionary:Languages#Etymology-only languages|etymology-only language]] code
language code (or name, if {method = "name"}) and converted into the corresponding object (see [[Module:languages]]).
If the code or name is invalid, then an error is thrown. The additional setting {family = true} can be given to allow
[[Wiktionary:Language families|language family codes]] to be considered valid and the corresponding object returned.
Note that to distinguish an etymology-only language object from a full language object, use
{object:hasType("language", "etymology-only")}.
:; {type = "full language"}
:: The value is interpreted as a full language code (or name, if {method = "name"}) and converted into the corresponding
object (see [[Module:languages]]). If the code or name is invalid, then an error is thrown. Etymology-only languages
are not allowed. The additional setting {family = true} can be given to allow
[[Wiktionary:Language families|language family codes]] to be considered valid and the corresponding object returned.
:; {type = "Wikimedia language"}
:: The value is interpreted as a code and converted into a Wikimedia language object. If the code is invalid, then an
error is thrown. If {fallback = true} is specified, conventional language codes which are different from their
Wikimedia equivalent will also be accepted as a fallback.
:; {type = "family"}
:: The value is interpreted as a language family code (or name, if {method = "name"}) and converted into the
corresponding object (see [[Module:families]]). If the code or name is invalid, then an error is thrown.
:; {type = "script"}
:: The value is interpreted as a script code (or name, if {method = "name"}) and converted into the corresponding object
(see [[Module:scripts]]). If the code or name is invalid, then an error is thrown.
:; {type = "title"}
:: The value is interpreted as a page title and converted into the corresponding object (see the
[[mw:Extension:Scribunto/Lua_reference_manual#Title_library|Title library]]). If the page title is invalid, then an
error is thrown; by default, external titles (i.e. those on other wikis) are not treated as valid. Options are:
::; {namespace = n}
::: The default namespace, where {n} is a namespace number; this is treated as {0} (the mainspace) if not specified.
::; {allow_external = true}
::: External titles are treated as valid.
::; {prefix = "namespace override"} (default)
::: The default namespace prefix will be prefixed to the value is already prefixed by a namespace prefix. For instance,
the input {"Foo"} with namespace {10} returns {"Template:Foo"}, {"Wiktionary:Foo"} returns {"Wiktionary:Foo"}, and
{"Template:Foo"} returns {"Template:Foo"}. Interwiki prefixes cannot act as overrides, however: the input {"fr:Foo"}
returns {"Template:fr:Foo"}.
::; {prefix = "force"}
::: The default namespace prefix will be prefixed unconditionally, even if the value already appears to be prefixed.
This is the way that {{tl|#invoke:}} works when calling modules from the module namespace ({828}): the input {"Foo"}
returns {"Module:Foo"}, {"Wiktionary:Foo"} returns {"Module:Wiktionary:Foo"}, and {"Module:Foo"} returns
{"Module:Module:Foo"}.
::; {prefix = "full override"}
::: The same as {prefix = "namespace override"}, except that interwiki prefixes can also act as overrides. For instance,
{"el:All topics"} with namespace {14} returns {"el:Category:All topics"}. Due to the limitations of MediaWiki, only
the first prefix in the value may act as an override, so the namespace cannot be overridden if the first prefix is
an interwiki prefix: e.g. {"el:Template:All topics"} with namespace {14} returns {"el:Category:Template:All topics"}.
:; {type = "parameter"}
:: The value is interpreted as the name of a parameter, and will be normalized using the method that Scribunto uses when
constructing a {frame.args} table of arguments. This means that integers will be converted to numbers, but all other
arguments will remain as strings (e.g. {"1"} will be normalized to {1}, but {"foo"} and {"1.5"} will remain
unchanged). Note that Scribunto also trims parameter names, following the same trimming method that this module
applies by default to all parameter types.
:: This type is useful when one set of input arguments is used to construct a {params} table for use in a subsequent
{export.process()} call with another set of input arguments; for instance, the set of valid parameters for a template
might be defined as {{tl|#invoke:[some module]|args=}} in the template, where {args} is a sublist of valid parameters
for the template.
:; {type = "qualifier"}
:: The value is interpreted as a qualifier and converted into the correct format for passing into `format_qualifiers()`
in [[Module:qualifier]] (which currently just means converting it to a one-item list).
:; {type = "labels"}
:: The value is interpreted as a comma-separated list of labels and converted into the correct format for passing into
`show_labels()` in [[Module:labels]] (which is currently a list of strings). Splitting is done on commas not followed
by whitespace, except that commas inside of double angle brackets do not count even if not followed by whitespace.
This type should be used by for normal labels (typically specified using {{para|l}} or {{para|ll}}) and accent
qualifiers (typically specified using {{para|a}} and {{para|aa}}).
:; {type = "references"}
:: The value is interpreted as one or more references, in the format prescribed by `parse_references()` in
[[Module:references]], and converted into a list of objects of the form accepted by `format_references()` in the same
module. If a syntax error is found in the reference format, an error is thrown.
:; {type = "genders"}
:: The value is interpreted as one or more comma-separated gender/number specs, in the format prescribed by
[[Module:gender and number]]. Inline modifiers (`<q:...>`, `<qq:...>`, `<l:...>`, `<ll:...>` or `<ref:...>`) may be
attached to a gender/number spec.
:; {type = "form of tags"}
:: The value is interpreted as an ampersand-separated list of grammar tags and converted into the correct format
for passing as `tags` into `tagged_inflections()` in [[Module:form of]] (which is currently a list of strings).
Splitting is always done by ampersands. This type should be used by for inflection qualifiers that act as
grammar tags (typically specified using {{para|infl}}).
:; {type = function(val) ... end}
:: `type` may be set to a function (or callable table), which must take the argument value as its sole argument, and must
output one of the other recognized types. This is particularly useful for lists (see below), where certain values need
to be interpreted differently to others.
; {list =}
: Treat the parameter as a list of values, each having its own parameter name, rather than a single value. The
parameters will have a number at the end, except optionally for the first (but see also {require_index = true}). For
example, {list = true} on a parameter named "head" will include the parameters {{para|head}} (or {{para|head1}}),
{{para|head2}}, {{para|head3}} and so on. If the parameter name is a number, another number doesn't get appended, but
the counting simply continues, e.g. for parameter {3} the sequence is {{para|3}}, {{para|4}}, {{para|5}} etc. List
parameters are returned as numbered lists, so for a template that is given the parameters `|head=a|head2=b|head3=c`,
the processed value of the parameter {"head"} will be { { "a", "b", "c" }}}.
: The value for {list =} can also be a string. This tells the module that parameters other than the first should have a
different name, which is useful when the first parameter in a list is a number, but the remainder is named. An example
would be for genders: {list = "g"} on a parameter named {1} would have parameters {{para|1}}, {{para|g2}}, {{para|g3}}
etc.
: If the number is not located at the end, it can be specified by putting {"\1"} at the number position. For example,
parameters {{para|f1accel}}, {{para|f2accel}}, ... can be captured by using the parameter name {"f\1accel"}, as is
done in [[Module:headword/templates]].
; {set =}
: Require that the value of the parameter be one of the specified values (or omitted, if {required = true} isn't given).
Two formats are allowed; either a list of possible values can be supplied, or a table can be supplied where the keys
are allowed values and the values are either `true` or a string naming a value found elsewhere in the table as a key.
In the latter case, the key is an alias and the value is the canonical value, and if the user uses the alias, it will
automatically be mapped to the canonical value. In such a case, the canonical value cannot itself be an alias. The use
of `set` is disallowed if {type = "boolean"} and causes an error to be thrown.
; {sublist =}
: The value of the parameter is a delimiter-separated list of individual raw values. The resulting field in `args` will
be a Lua list (i.e. a table with numeric indices) of the converted values. If {sublist = true} is given, the values
will be split on commas (possibly with whitespace on one or both sides of the comma, which is ignored). If
{sublist = "comma without whitespace"} is given, the values will be split on commas which are not followed by whitespace,
and which aren't preceded by an escaping backslash. Otherwise, the value of `sublist` should be either a Lua pattern
specifying the delimiter(s) to split on or a function (or callable table) to do the splitting, which is passed two values
(the value to split and a function to signal an error) and should return a list of the split values.
; {convert =}
: If given, this specifies a function (or callable table) to convert the raw parameter value into the Lua object used
during further processing. The function is passed two arguments, the raw parameter value itself and a function used to
signal an error during parsing or conversion, and should return one value, the converted parameter. The error-signaling
function contains the name and raw value of the parameter embedded into the message it generates, so these do not need to
specified in the message passed into it. If `type` is specified in conjunction with `convert`, the processing by
`type` happens first. If `sublist` is given in conjunction with `convert`, the raw parameter value will be split
appropriately and `convert` called on each resulting item.
; {allow_hex = true}
: When used in conjunction with {type = "number"}, allows hexadecimal numbers as inputs, in the format {"0x100"} (which is
not case-sensitive).
; {family = true}
: When used in conjunction with {type = "language"}, allows [[Wiktionary:Language families|language family codes]] to be
returned. To check if a given object refers to a language family, use {object:hasType("family")}.
; {method = "name"}
: When used in conjunction with {type = "language"}, {type = "family"} or {type = "script"}, checks for and parses a
language, family or script name instead of a code.
; {allow_holes = true}
: This is used in conjunction with list-type parameters. By default, the values are tightly packed in the resulting
list. This means that if, for example, an entry specified `head=a|head3=c` but not {{para|head2}}, the returned list
will be { {"a", "c"}}}, with the values stored at the indices {1} and {2}, not {1} and {3}. If it is desirable to keep
the numbering intact, for example if the numbers of several list parameters correlate with each other (like those of
{{tl|affix}}), then this tag should be specified.
: If {allow_holes = true} is given, there may be {nil} values in between two real values, which makes many of Lua's
table processing functions no longer work, like {#} or {ipairs()}. To remedy this, the resulting table will contain an
additional named value, `maxindex`, which tells you the highest numeric index that is present in the table. In the
example above, the resulting table will now be { { "a", nil, "c", maxindex = 3}}}. That way, you can iterate over the
values from {1} to `maxindex`, while skipping {nil} values in between.
; {disallow_holes = true}
: This is used in conjunction with list-type parameters. As mentioned above, normally if there is a hole in the source
arguments, e.g. `head=a|head3=c` but not {{para|head2}}, it will be removed in the returned list. If
{disallow_holes = true} is specified, however, an error is thrown in such a case. This should be used whenever there
are multiple list-type parameters that need to line up (e.g. both {{para|head}} and {{para|tr}} are available and
{{para|head3}} lines up with {{para|tr3}}), unless {allow_holes = true} is given and you are prepared to handle the
holes in the returned lists.
; {disallow_missing = true}
: This is similar to {disallow_holes = true}, but an error will not be thrown if an argument is blank, rather than
completely missing. This may be used to tolerate intermediate blank numerical parameters, which sometimes occur in list
templates. For instance, `head=a|head2=|head3=c` will not throw an error, but `head=a|head3=c` will.
; {require_index = true}
: This is used in conjunction with list-type parameters. By default, the first parameter can have its index omitted.
For example, a list parameter named `head` can have its first parameter specified as either {{para|head}} or
{{para|head1}}. If {require_index = true} is specified, however, only {{para|head1}} is recognized, and {{para|head}}
will be treated as an unknown parameter. {{tl|affixusex}} (and variants {{tl|suffixusex}}, {{tl|prefixusex}}) use
this, for example, on all list parameters.
; {separate_no_index = true}
: This is used to distinguish between {{para|head}} and {{para|head1}} as different parameters. For example, in
{{tl|affixusex}}, to distinguish between {{para|sc}} (a script code for all elements in the usex's language) and
{{para|sc1}} (the script code of the first element, used when the first element is prefixed with a language code to
indicate that it is in a different language). When this is used, the resulting table will contain an additional named
value, `default`, which contains the value for the indexless argument.
; {flatten = true}
: This is used in conjunction with list-type parameters when `sublist` or a list-generating type such as {"labels"} or
{"genders"} is also specified, and causes the resulting list to be flattened. Not currently compatible with
{allow_holes = true}.
; {replaced_by =}
: Specifies that the parameter is no longer valid, and has been replaced by some other mechanism. If the value of
`replaced_by` is a string, it is the name of the new parameter to use instead. Use the `reason` tag to specify the
reason why this change has been made, e.g.
{reason = "for consistency with the corresponding parameter in other Romance-language headword templates"}. If the
value of `replaced_by` is {false}, there is no replacement parameter. In this case, `instead` should be supplied
with a description of what to do instead, e.g.
{instead = "use an inline modifier on |2= such as <q:...>, <qq:...>, <l:...> or <ll:...>"}. You can also supply a
justification in `reason` if you feel it is appropriate or necessary to do so.
; {reason =}
: When used in conjunction with `replaced_by`, specifies the reason for the parameter replacement.
; {instead =}
: When used in conjunction with {replaced_by = false}, specifies what to do instead of using the removed parameter.
; {demo = true}
: This is used as a way to ensure that the parameter is only enabled on the template's own page (and its documentation
page), and in the User: namespace; otherwise, it will be treated as an unknown parameter. This should only be used if
special settings are required to showcase a template in its documentation (e.g. adjusting the pagename or disabling
categorization). In most cases, it should be possible to do this without using demo parameters, but they may be
required if a template/documentation page also contains real uses of the same template as well (e.g. {{tl|shortcut}}),
as a way to distinguish them.
; {deprecated = true}
: This is for tracking the use of deprecated parameters, including any aliases that are being brought out of use. See
[[Wiktionary:Tracking]] for more information.
]==]
-- Returns true if the current page is a template or module containing the current {{#invoke}}.
-- If the include_documentation argument is given, also returns true if the current page is either page's documentation page.
local own_page, own_page_or_documentation
local function is_own_page(include_documentation)
if own_page == nil then
if current_namespace == nil then
local current_title = mw_title.getCurrentTitle()
current_title_text, current_namespace = current_title.prefixedText, current_title.namespace
end
local frame = current_namespace == 828 and mw.getCurrentFrame() or
current_namespace == 10 and mw.getCurrentFrame():getParent()
if frame then
local frame_title_text = frame:getTitle()
own_page = current_title_text == frame_title_text
own_page_or_documentation = own_page or current_title_text == frame_title_text .. "/documentation"
else
own_page, own_page_or_documentation = false, false
end
end
return include_documentation and own_page_or_documentation or own_page
end
-------------------------------------- Some helper functions -----------------------------
-- Convert a list in `list` to a string, separating the final element from the preceding one(s) by `conjunction`. If
-- `dump_vals` is given, pass all values in `list` through mw.dumpObject() (WARNING: this destructively modifies
-- `list`). This is similar to serialCommaJoin() in [[Module:table]] when used with the `dontTag = true` option, but
-- internally uses mw.text.listToText().
local function concat_list(list, conjunction, dump_vals)
if dump_vals then
for k, v in pairs(list) do
list[k] = dump(v)
end
end
return list_to_text(list, nil, conjunction)
end
-- A helper function for use with generating error-signaling functions in the presence of raw value conversion. Format a
-- message `msg`, including the processed value `processed` if it is different from the raw value `rawval`; otherwise,
-- just return `msg`.
local function msg_with_processed(msg, rawval, processed)
if rawval == processed then
return msg
end
local processed_type = type(processed)
return format("%s (processed value %s)",
msg, (processed_type == "string" or processed_type == "number") and processed or dump(processed)
)
end
-- Separate form of tags with ampersand (&).
local function split_tags_on_ampersand(tags)
return split(tags, "&")
end
-------------------------------------- Error handling -----------------------------
local function process_error(fmt, ...)
local args = {...}
for i, val in ipairs(args) do
args[i] = dump(val)
end
if type(fmt) == "table" then
-- hacky signal that we're called from internal_process_error(), and not to omit stack frames
return error(format(fmt[1], unpack(args)))
end
return error(format(fmt, unpack(args)), 3)
end
local function internal_process_error(fmt, ...)
process_error({"Internal error in `params` table: " .. fmt}, ...)
end
-- Check that a parameter or argument is in the form form Scribunto normalizes input argument keys into (e.g. 1 not "1", "foo" not " foo "). Otherwise, it won't be possible to normalize inputs in the expected way. Unless is_argument is set, also check that the name only contains one placeholder at most, and that strings don't resolve to numeric keys once the placeholder has been substituted.
local function validate_name(name, desc, extra_name, is_argument)
local normalized = scribunto_parameter_key(name)
if name and name == normalized then
if is_argument or type(name) ~= "string" then
return
end
local placeholder = find(name, "\1", nil, true)
if not placeholder then
return
elseif find(name, "\1", placeholder + 1, true) then
error(format(
"Internal error: expected %s to only contain one placeholder, but saw %s",
extra_name and (desc .. dump(extra_name)) or desc, dump(name)
))
end
local first_name = gsub(name, "\1", "1")
normalized = scribunto_parameter_key(first_name)
if first_name == normalized then
return
end
error(format(
"Internal error: %s cannot resolve to numeric parameters once any placeholder has been substituted, but %s resolves to %s",
extra_name and (desc .. dump(extra_name)) or desc, dump(name), dump(normalized)
))
elseif normalized == nil then
error(format(
"Internal error: expected %s to be of type string or number, but saw %s",
extra_name and (desc .. dump(extra_name)) or desc, type(name)
))
end
error(format(
"Internal error: expected %s to be Scribunto-compatible: %s (a %s) should be %s (a %s)",
extra_name and (desc .. dump(extra_name)) or desc, dump(name), type(name), dump(normalized), type(normalized)
))
end
local function validate_alias_options(...)
local invalid = {
required = true,
default = true,
template_default = true,
allow_holes = true,
disallow_holes = true,
disallow_missing = true,
}
function validate_alias_options(param, name, main_param, alias_of)
for k in pairs(param) do
if invalid[k] then
track("bad alias option")
-- internal_process_error(
-- "parameter %s cannot have the option %s, as it is an alias of parameter %s.",
-- name, option, alias_of
-- )
end
end
-- Soon, aliases will inherit options from the main parameter via __index. Track cases where this would happen.
if main_param ~= true then
for k in pairs(main_param) do
if param[k] == nil and not invalid[k] then
if k == "list" then -- these need to be changed to list = false to retain current behaviour
track("mismatched list alias option")
elseif not (k == "type" or k == "set" or k == "sublist") then -- rarely specified on aliases, as they're effectively inherited already
track("mismatched alias option")
end
end
end
end
end
validate_alias_options(...)
end
-- TODO: give ranges instead of long lists, if possible.
--[==[ func: export.params_list_error(params, msg)
Given a key-value table of raw parameters `params`, display an error message about all the parameters seen in the table.
The parameter names are displayed in sorted order. `msg` should be e.g. {"required"} or {"not used by this template"}.
This is used internally to display error messages about required or invalid parameters, and can be used for the same
purpose by code that processes its own parameters (e.g. if the `return_unknown` flag is specified to `process`).
]==]
local function params_list_error(params, msg)
local list, n = {}, 0
for name in sorted_pairs(params) do
n = n + 1
list[n] = name
end
error(format(
"Parameter%s %s.",
format(n == 1 and " %s is" or "s %s are", concat_list(list, " and ", true)),
msg
), 3)
end
export.params_list_error = params_list_error
-- Helper function for use with convert_val_error(). Format a list of possible choices using `concat_list` and
-- conjunction "or", displaying "either " before the choices if there's more than one.
local function format_choice_list(valid)
return (#valid > 1 and "either " or "") .. concat_list(valid, " or ")
end
-- Signal an error for a value `val` that is not of the right type `valid` (which is either a string specifying a type, or
-- a list of possible values, in the case where `set` was used). `name` is the name of the parameter and can be a
-- function to signal an error (which is assumed to automatically display the parameter's name and value). `seetext` is
-- an optional additional explanatory link to display (e.g. [[WT:LOL]], the list of possible languages and codes).
local function convert_val_error(val, name, valid, seetext)
if is_callable(name) then
if type(valid) == "table" then
valid = "choice, must be " .. format_choice_list(valid)
end
name(format("Invalid %s; the value %s is not valid%s", valid, val, seetext and "; see " .. seetext or ""))
else
if type(valid) == "table" then
valid = format_choice_list(valid)
else
valid = "a valid " .. valid
end
error(format("Parameter %s must be %s; the value %s is not valid.%s", dump(name), valid, dump(val),
seetext and " See " .. seetext .. "." or ""))
end
end
-- Generate the appropriate error-signaling function given parameter value `val` and name `name`. If `name` is already
-- a function, it is just returned; otherwise a function is generated and returned that displays the passed-in messaeg
-- along with the parameter's name and value.
local function make_parse_err(val, name)
if is_callable(name) then
return name
end
return function(msg)
error(format("%s: parameter %s=%s", msg, name, val))
end
end
-------------------------------------- Value conversion -----------------------------
-- For a list parameter `name` and corresponding value `list_name` of the `list` field (which should have the same value
-- as `name` if `list = true` was given), generate a pattern to match parameters of the list and store the pattern as a
-- key in `patterns`, with corresponding value set to `name`. For example, if `list_name` is "tr", the pattern will
-- match "tr" as well as "tr1", "tr2", ..., "tr10", "tr11", etc. If the `list_name` contains a \1 in it, the numeric
-- portion goes in place of the \1. For example, if `list_name` is "f\1accel", the pattern will match "faccel",
-- "f1accel", "f2accel", etc. Any \1 in `name` is removed before storing into `patterns`.
local function save_pattern(name, list_name, patterns)
name = type(name) == "string" and gsub(name, "\1", "") or name
if find(list_name, "\1", nil, true) then
patterns["^" .. gsub(pattern_escape(list_name), "\1", "([1-9]%%d*)") .. "$"] = name
else
patterns["^" .. pattern_escape(list_name) .. "([1-9]%d*)$"] = name
list_name = list_name .. "\1"
end
validate_name(list_name, "the list field of parameter ", name)
return patterns
end
-- A helper function for use with `sublist`. It is an iterator function for use in a for-loop that returns split
-- elements of `val` using `sublist` (a Lua split pattern; boolean `true` to split on commas optionally surrounded by
-- whitespace; "comma without whitespace" to split only on commas not followed by whitespace which have not been escaped
-- by a backslash; or a function to do the splitting, which is passed two values, the value to split and a function to
-- signal an error, and should return a list of the split elements). `name` is the parameter name or error-signaling
-- function passed into convert_val().
local function split_sublist(val, name, sublist)
if sublist == true then
return gsplit(val, "%s*,%s*")
-- Split an argument on comma, but not comma followed by whitespace.
elseif sublist == "comma without whitespace" then
-- If difficult cases, use split_on_comma.
if find(val, "\\", nil, true) or match(val, ",%s") then
return iterate_list(split_on_comma(val))
end
-- Otherwise, use gsplit.
return gsplit(val, ",")
elseif type(sublist) == "string" then
return gsplit(val, sublist)
elseif not is_callable(sublist) then
error(format('Internal error: expected `sublist` to be of type "string" or "function" or boolean `true`, but saw %s', dump(sublist)))
end
return iterate_list(sublist(val, make_parse_err(val, name)))
end
-- For parameter named `name` with value `val` and param spec `param`, if the `set` field is specified, verify that the
-- value is one of the one specified in `set`, and throw an error otherwise. `name` is taken directly from the
-- corresponding parameter passed into convert_val() and may be a function to signal an error. Optional `param_type` is
-- a string specifying the conversion type of `val` and is used for special-casing: If `param_type` is "boolean", an
-- internal error is thrown (since `set` cannot be used in conjunction with booleans) and if `param_type` is "number",
-- no checking happens because in this case `set` contains numbers and is checked inside the number conversion function
-- itself, after converting `val` to a number. Return the canonical value of `val` (which may be different from `val`
-- if an alias map is given).
local function check_set(val, name, param, param_type)
if param_type == "boolean" then
error(format('Internal error: cannot use `set` with `type = "%s"`', param_type))
-- Needs to be special cased because the check happens after conversion to numbers.
elseif param_type == "number" then
return val
end
local set, map = param.set
if sets == nil then
map = finalize_set(set, name)
sets = {[set] = map}
else
map = sets[set]
if map == nil then
map = finalize_set(set, name)
sets[set] = map
end
end
local newval = map[val]
if newval == true then
return val
elseif newval ~= nil then
return newval
end
local list = {}
for k, v in sorted_pairs(map) do
if v == true then
insert(list, dump(k))
else
insert(list, ("%s (alias of %s)"):format(dump(k), dump(v)))
end
end
-- If the parameter is not required then put "or empty" at the end of the list, to avoid implying the parameter is actually required.
if not param.required then
insert(list, "empty")
end
convert_val_error(val, name, list)
end
local function convert_language(val, name, param, allow_etym)
local method, func = param.method
if method == nil or method == "code" then
func, method = get_language_by_code, "code"
elseif method == "name" then
func, method = get_language_by_name, "name"
else
error(format('Internal error: expected `method` for type `language` to be "code", "name" or undefined, but saw %s', dump(method)))
end
local lang = func(val, nil, allow_etym, param.family)
if lang then
return lang
end
local list, links = {"language"}, {"[[WT:LOL]]"}
if allow_etym then
insert(list, "etymology language")
insert(links, "[[WT:LOL/E]]")
end
if param.family then
insert(list, "family")
insert(links, "[[WT:LOF]]")
end
convert_val_error(val, name, concat_list(list, " or ") .. " " .. (method == "name" and "name" or "code"), concat_list(links, " and "))
end
local function convert_number(val, allow_hex)
-- Call tonumber_extended with the `real_finite` flag, which filters out ±infinity and NaN.
-- By default, specify base 10, which prevents 0x hex inputs from being converted.
-- If `allow_hex` is set, then don't give a base, which means 0x hex inputs will work.
local num = tonumber_extended(val, not allow_hex and 10 or nil, "finite_real")
if not num then
return num
end
if match(val, "[eEpP.]") then -- float
track("number not an integer")
end
if find(val, "+", nil, true) then
track("number with +")
end
-- Track various unusual number inputs to determine if it should be restricted to positive integers by default (possibly including 0).
if not is_positive_integer(num) then
track("number not a positive integer")
if num == 0 then
track("number is 0")
elseif not is_integer(num) then
track("number not an integer")
end
end
return num
end
-- TODO: validate parameter specs separately, as it's making the handler code really messy at the moment.
local type_handlers = setmetatable({
["boolean"] = function(val)
return yesno(val, true)
end,
["family"] = function(val, name, param)
local method, func = param.method
if method == nil or method == "code" then
func, method = get_family_by_code, "code"
elseif method == "name" then
func, method = get_family_by_name, "name"
else
error(format('Internal error: expected `method` for type `family` to be "code", "name" or undefined, but saw %s', dump(method)))
end
return func(val) or convert_val_error(val, name, "family " .. method, "[[WT:LOF]]")
end,
["labels"] = function(val, name, param)
-- FIXME: Should be able to pass in a parse_err function.
return split_labels_on_comma(val)
end,
["form of tags"] = function(val, name, param)
return split_tags_on_ampersand(val)
end,
["language"] = function(val, name, param)
return convert_language(val, name, param, true)
end,
["full language"] = convert_language,
["number"] = function(val, name, param)
local allow_hex = param.allow_hex
if allow_hex and allow_hex ~= true then
error(format(
'Internal error: expected `allow_hex` for type `number` to be of type "boolean" or undefined, but saw %s',
dump(allow_hex)
))
end
local num = convert_number(val, allow_hex)
if param.set then
-- Don't pass in "number" here; otherwise no checking will happen.
num = check_set(num, name, param)
end
if num then
return num
end
convert_val_error(val, name, (allow_hex and "decimal or hexadecimal " or "") .. "number")
end,
["range"] = function(val, name, param)
local allow_hex = param.allow_hex
if allow_hex and allow_hex ~= true then
error(format(
'Internal error: expected `allow_hex` for type `range` to be of type "boolean" or undefined, but saw %s',
dump(allow_hex)
))
end
-- Pattern ensures leading minus signs are accounted for.
local m1, m2 = match(val, "^(%s*%S.-)%-(%s*%S.*)")
if m1 then
m1 = convert_number(m1, allow_hex)
if m1 then
m2 = convert_number(m2, allow_hex)
if m2 then
return {m1, m2}
end
end
end
-- Try `val` if it couldn't be split into a range, and return a range of `val` to `val` if possible.
local num = convert_number(val, allow_hex)
if num then
return {num, num}
end
convert_val_error(val, name, (allow_hex and "decimal or hexadecimal " or "") .. "number or a hyphen-separated range of two numbers")
end,
["parameter"] = function(val, name, param)
-- Use the `no_trim` option, as any trimming will have already been done.
return scribunto_parameter_key(val, true)
end,
["qualifier"] = function(val, name, param)
return {val}
end,
["references"] = function(val, name, param)
return parse_references(val, make_parse_err(val, name))
end,
["genders"] = function(val, name, param)
if not val:find("[,<]") then
return {{spec = val}}
end
-- NOTE: We don't pass in allow_space_around_comma. Consistent with other comma-separated types, there shouldn't
-- be spaces around the comma.
return parse_gender_and_number_spec {
spec = val,
parse_err = make_parse_err(val, name),
allow_multiple = true,
}
end,
["script"] = function(val, name, param)
local method, func = param.method
if method == nil or method == "code" then
func, method = get_script_by_code, "code"
elseif method == "name" then
func, method = get_script_by_name, "name"
else
error(format('Internal error: expected `method` for type `script` to be "code", "name" or undefined, but saw %s', dump(method)))
end
return func(val) or convert_val_error(val, name, "script " .. method, "[[WT:LOS]]")
end,
["string"] = function(val, name, param) -- To be removed as unnecessary.
track("string")
return val
end,
-- TODO: add support for resolving to unsupported titles.
-- TODO: split this into "page name" (i.e. internal) and "link target" (i.e. external as well), which is more intuitive.
["title"] = function(val, name, param)
local namespace = param.namespace
if namespace == nil then
namespace = 0
else
local valid_type = type(namespace) ~= "number" and 'of type "number" or undefined' or
not namespaces[namespace] and "a valid namespace number" or
nil
if valid_type then
error(format('Internal error: expected `namespace` for type `title` to be %s, but saw %s', valid_type, dump(namespace)))
end
end
-- Decode entities. WARNING: mw.title.makeTitle must be called with `decoded` (as it doesn't decode) and mw.title.new must be called with `val` (as it does decode, so double-decoding needs to be avoided).
local decoded, prefix, title = decode_entities(val), param.prefix
-- If the input is a fragment, treat the title as the current title with the input fragment.
if sub(decoded, 1, 1) == "#" then
-- If prefix is "force", only get the current title if it's in the specified namespace. current_title includes the namespace prefix.
if current_namespace == nil then
local current_title = mw_title.getCurrentTitle()
current_title_text, current_namespace = current_title.prefixedText, current_title.namespace
end
if not (prefix == "force" and namespace ~= current_namespace) then
title = new_title(current_title_text .. val)
end
elseif prefix == "force" then
-- Unconditionally add the namespace prefix (mw.title.makeTitle).
title = make_title(namespace, decoded)
elseif prefix == "full override" then
-- The first input prefix will be used as an override (mw.title.new). This can be a namespace or interwiki prefix.
title = new_title(val, namespace)
elseif prefix == nil or prefix == "namespace override" then
-- Only allow namespace prefixes to override. Interwiki prefixes therefore need to be treated as plaintext (e.g. "el:All topics" with namespace 14 returns "el:Category:All topics", but we want "Category:el:All topics" instead; if the former is really needed, then the input ":el:Category:All topics" will work, as the initial colon overrides the namespace). mw.title.new can take namespace names as well as numbers in the second argument, and will throw an error if the input isn't a valid namespace, so this can be used to determine if a prefix is for a namespace, since mw.title.new will return successfully only if there's either no prefix or the prefix is for a valid namespace (in which case we want the override).
local success
success, title = pcall(new_title, val, match(decoded, "^.-%f[:]") or namespace)
-- Otherwise, get the title with mw.title.makeTitle, which unconditionally adds the namespace prefix, but behaves like mw.title.new if the namespace is 0.
if not success then
title = make_title(namespace, decoded)
end
else
error(format('Internal error: expected `prefix` for type `title` to be "force", "full override", "namespace override" or undefined, but saw %s', dump(prefix)))
end
local allow_external = param.allow_external
if allow_external == true then
return title or convert_val_error(val, name, "Wiktionary or external page title")
elseif not allow_external then
return title and is_internal_title(title) and title or convert_val_error(val, name, "Wiktionary page title")
end
error(format('Internal error: expected `allow_external` for type `title` to be of type "boolean" or undefined, but saw %s', dump(allow_external)))
end,
["Wikimedia language"] = function(val, name, param)
local fallback = param.fallback
if fallback == true then
return get_wm_lang_by_code_with_fallback(val) or convert_val_error(val, name, "Wikimedia language or language code")
elseif not fallback then
return get_wm_lang_by_code(val) or convert_val_error(val, name, "Wikimedia language code")
end
error(format('Internal error: expected `fallback` for type `Wikimedia language` to be of type "boolean" or undefined, but saw %s', dump(fallback)))
end,
}, {
-- TODO: decode HTML entities in all input values. Non-trivial to implement, because we need to avoid any downstream functions decoding the output from this module, which would be double-decoding. Note that "title" has this implemented already, and it needs to have both the raw input and the decoded input to avoid double-decoding by me.title.new, so any implementation can't be as simple as decoding in __call then passing the result to the handler.
__call = function(self, val, name, param, param_type, default)
local val_type = type(val)
-- TODO: check this for all possible parameter types.
if val_type == param_type then
return val
elseif val_type ~= "string" then
local expected = "string"
if default and (param_type == "boolean" or param_type == "number") then
expected = param_type .. " or " .. expected
end
error(format(
"Internal error: %sargument %s has the type %s; expected a %s.",
default and (default .. " for ") or "", name, dump(val_type), expected
))
end
local func = self[param_type]
if func == nil then
error(format("Internal error: %s is not a recognized parameter type.", dump(param_type)))
end
return func(val, name, param)
end
})
--[==[ func: export.convert_val(val, name, param)
Convert a parameter value according to the associated specs listed in the `params` table passed to
[[Module:parameters]]. `val` is the value to convert for a parameter whose name is `name` (used only in error messages).
`param` is the spec (the value part of the `params` table for the parameter). In place of passing in the parameter name,
`name` can be a function that throws an error, displaying the specified message along with the parameter name and value.
This function processes all the conversion-related fields in `param`, including `type`, `set`, `sublist`, `convert`,
etc. It returns the converted value.
]==]
local function convert_val(val, name, param, default)
local param_type = param.type or "string"
-- If param.type is a function, resolve it to a recognized type.
if is_callable(param_type) then
param_type = param_type(val)
end
local convert, sublist = param.convert, param.sublist
-- `val` might not be a string if it's the default value.
if sublist and type(val) == "string" then
local retlist, set = {}, param.set
if convert then
local thisindex, thisval, insval, parse_err = 0
if is_callable(name) then
-- We assume the passed-in error function in `name` already shows the parameter name and raw value.
function parse_err(msg)
name(format("%s: item #%s=%s",
msg_with_processed(msg, thisval, insval), thisindex, thisval)
)
end
else
function parse_err(msg)
error(format("%s: item #%s=%s of parameter %s=%s",
msg_with_processed(msg, thisval, insval), thisindex, thisval, name, val)
)
end
end
for v in split_sublist(val, name, sublist) do
thisindex, thisval = thisindex + 1, v
if set then
v = check_set(v, name, param, param_type)
end
insert(retlist, convert(type_handlers(v, name, param, param_type, default), parse_err))
end
else
for v in split_sublist(val, name, sublist) do
if set then
v = check_set(v, name, param, param_type)
end
insert(retlist, type_handlers(v, name, param, param_type, default))
end
end
return retlist
elseif param.set then
val = check_set(val, name, param, param_type)
end
local retval = type_handlers(val, name, param, param_type, default)
if convert then
local parse_err
if is_callable(name) then
-- We assume the passed-in error function in `name` already shows the parameter name and raw value.
if retval == val then
-- This is an optimization to avoid creating a closure. The second arm works correctly even
-- when retval == val.
parse_err = name
else
function parse_err(msg)
name(msg_with_processed(msg, val, retval))
end
end
else
function parse_err(msg)
error(format("%s: parameter %s=%s", msg_with_processed(msg, val, retval), name, val))
end
end
retval = convert(retval, parse_err)
end
-- If `sublist` is set but the input wasn't a string, return `retval` as a one-item list.
if sublist then
retval = {retval}
end
return retval
end
export.convert_val = convert_val -- used by [[Module:parameter utilities]]
local function unknown_param(name, val, args_unknown)
track("unknown parameters")
args_unknown[name] = val
return args_unknown
end
local function check_string_param_modifier(param_type, name, tag)
if param_type and not (param_type == "string" or param_type == "parameter" or is_callable(param_type)) then
internal_process_error(
"%s cannot be set unless %s is set to %s (the default), %s or a function: parameter %s has the type %s.",
tag, "type", "string", "parameter", name, param_type
)
end
end
local function hole_error(params, name, listname, this, nxt, extra)
-- `process_error` calls `dump` on values to be inserted into
-- error messages, but with numeric lists this causes "numeric"
-- to look like the name of the list rather than a description,
-- as `dump` adds quote marks. Insert it early to avoid this,
-- but add another %s specifier in all other cases, so that
-- actual list names will be displayed properly.
local offset, specifier, starting_from = 0, "%s", ""
local msg = "Item %%d in the list of %s parameters must be given if item %%d is given, because %sthere shouldn't be any gaps due to missing%s parameters."
local specs = {}
if type(listname) == "string" then
specs[2] = listname
elseif type(name) == "number" then
offset = name - 1 -- To get the original parameter.
specifier = "numeric"
-- If the list doesn't start at parameter 1, avoid implying
-- there can't be any gaps in the numeric parameters if
-- some parameter with a lower key is optional.
for j = name - 1, 1, -1 do
local _param = params[j]
if not (_param and _param.required) then
starting_from = format("(starting from parameter %d) ", dump(j + 1))
break
end
end
else
specs[2] = name
end
specs[1] = this + offset -- Absolute index for this item.
insert(specs, nxt + offset) -- Absolute index for the next item.
process_error(format(msg, specifier, starting_from, extra or ""), unpack(specs))
end
local function check_disallow_holes(params, val, name, listname, extra)
for i = 1, val.maxindex do
if val[i] == nil then
hole_error(params, name, listname, i, num_keys(val)[i], extra)
end
end
end
local function handle_holes(params, val, name)
local param = params[name]
local disallow_holes = param.disallow_holes
-- Iterate up the list, and throw an error if a hole is found.
if disallow_holes then
check_disallow_holes(params, val, name, param.list, " or empty")
end
-- Iterate up the list, and throw an error if a hole is found due to a
-- missing parameter, treating empty parameters as part of the list. This
-- applies beyond maxindex if blank arguments are supplied beyond it, so
-- isn't mutually exclusive with `disallow_holes`.
local empty = val.empty
if param.disallow_missing then
if empty then
-- Remove `empty` from `val`, so it doesn't get returned.
val.empty = nil
for i = 1, max(val.maxindex, empty.maxindex) do
if val[i] == nil and not empty[i] then
local keys = extend(num_keys(val), num_keys(empty))
sort(keys)
hole_error(params, name, param.list, i, keys[i])
end
end
-- If there's no table of empty parameters, the check is identical to
-- `disallow_holes`, except that the error message only refers to
-- missing parameters, not missing or empty ones. If `disallow_holes` is
-- also set, there's no point checking again.
elseif not disallow_holes then
check_disallow_holes(params, val, name, param.list)
end
end
-- If `allow_holes` is set, there's nothing left to do.
if param.allow_holes then
-- do nothing
-- Otherwise, remove any holes: `pairs` won't work, as it's unsorted, and
-- iterating from 1 to `maxindex` times out with inputs like |100000000000=,
-- so use num_keys to get a list of numerical keys sorted from lowest to
-- highest, then iterate up the list, moving each value in `val` to the
-- lowest unused positive integer key. This also avoids the need to create a
-- new table. If `disallow_holes` is specified, then there can't be any
-- holes in the list, so there's no reason to check again; this doesn't
-- apply to `disallow_missing`, however.
else
if not disallow_holes then
local keys, i = num_keys(val), 0
while true do
i = i + 1
local key = keys[i]
if key == nil then
break
elseif i ~= key then
track("holes compressed")
val[i], val[key] = val[key], nil
end
end
end
-- Some code depends on only numeric params being present when no holes are
-- allowed (e.g. by checking for the presence of arguments using next()), so
-- remove `maxindex`.
val.maxindex = nil
end
end
local function maybe_flatten(params, val, name)
local param = params[name]
if param.flatten then
if param.allow_holes then
process_error("For parameter %s, can't set both `allow_holes` and `flatten`", name)
end
if not param.sublist and param.type ~= "genders" and param.type ~= "labels" and
param.type ~= "references" and param.type ~= "qualifier" and param.type ~= "form of tags" then
process_error("For parameter %s, can only set `flatten` along with `sublist` or a list-generating type", name)
end
-- Do the flattening ourselves rather than calling flatten() in [[Module:table]], which will attempt to
-- flatten non-list objects like title objects, and cause an error in the process.
-- FIXME: We should do this in-place if possible.
local newlist = {}
for _, sublist in ipairs(val) do
for _, item in ipairs(sublist) do
insert(newlist, item)
end
end
val = newlist
end
return val
end
-- If both `template_default` and `default` are given, `template_default` takes precedence, but only on the template or
-- module page. This means a different default can be specified for the template or module page example. However,
-- `template_default` doesn't apply if any args are set, which helps (somewhat) with examples on documentation pages
-- transcluded into the template page. HACK: We still run into problems on documentation pages transcluded into the
-- template page when pagename= is set. Check this on the assumption that pagename= is fairly standard.
local function convert_default_val(name, param, pagename_set, any_args_set, add_empty_sublist)
if not pagename_set then
local val = param.template_default
if val ~= nil and not any_args_set and is_own_page() then
return convert_val(val, name, param, "template default")
end
end
local val = param.default
if val ~= nil then
return convert_val(val, name, param, "default")
-- Sublist parameters should return an empty table if not given, but only do
-- this if the parameter isn't also a list (in which case it will already
-- be an empty table).
-- FIXME: do this once all modules that pass in a sublist parameter treat an empty sublist identically to a nil argument; some currently do things based on the fact an argument exists at all.
-- elseif add_empty_sublist and param.sublist then
--return {}
end
end
--[==[
Process arguments with a given list of parameters. Return a table containing the processed arguments. The `args`
parameter specifies the arguments to be processed; they are the arguments you might retrieve from
{frame:getParent().args} (the template arguments) or in some cases {frame.args} (the invocation arguments). The `params`
parameter specifies a list of valid parameters, and consists of a table. If an argument is encountered that is not in
the parameter table, an error is thrown.
The structure of the `params` table is as described above in the intro comment.
'''WARNING:''' The `params` table is destructively modified to save memory. Nonetheless, different keys can share the
same value objects in memory without causing problems.
The `return_unknown` parameter, if set to {true}, prevents the function from triggering an error when it comes across an
argument with a name that it doesn't recognise. Instead, the return value is a pair of values: the first is the
processed arguments as usual, while the second contains all the unrecognised arguments that were left unprocessed. This
allows you to do multi-stage processing, where the entire set of arguments that a template should accept is not known at
once. For example, an inflection-table might do some generic processing on some arguments, but then defer processing of
the remainder to the function that handles a specific inflectional type.
]==]
function export.process(args, params, return_unknown)
-- Process parameters for specific properties
local args_new, args_unknown, any_args_set, required, patterns, list_args, index_list, args_placeholders, placeholders_n = {}
-- TODO: memoize the processing of each unique `param` value, since it's common for the same value to be used for many parameter names.
for name, param in pairs(params) do
validate_name(name, "parameter names")
if param ~= true then
local spec_type = type(param)
if type(param) ~= "table" then
internal_process_error(
"spec for parameter %s must be a table of specs or the value true, but found %s.",
name, spec_type ~= "boolean" and spec_type or param
)
end
-- Populate required table, and make sure aliases aren't set to required.
if param.required then
if required == nil then
required = {}
end
required[name] = true
end
local listname, alias_of = param.list, param.alias_of
if alias_of then
validate_name(alias_of, "the alias_of field of parameter ", name)
if alias_of == name then
internal_process_error(
"parameter %s cannot be an alias of itself.",
name
)
end
local main_param = params[alias_of]
-- Check that the alias_of is set to a valid parameter.
if not (main_param == true or type(main_param) == "table") then
internal_process_error(
"parameter %s is an alias of an invalid parameter.",
name
)
end
validate_alias_options(param, name, main_param, alias_of)
-- Aliases can't be lists unless the canonical parameter is also a list.
if listname and (main_param == true or not main_param.list) then
internal_process_error(
"list parameter %s is set as an alias of %s, which is not a list parameter.", name, alias_of
)
-- Can't be an alias of an alias.
elseif main_param ~= true then
local main_alias_of = main_param.alias_of
if main_alias_of ~= nil then
internal_process_error(
"alias_of cannot be set to another alias: parameter %s is set as an alias of %s, which is in turn an alias of %s. Set alias_of for %s to %s.",
name, alias_of, main_alias_of, name, main_alias_of
)
end
end
end
local replaced_by = param.replaced_by
if replaced_by then -- replaced_by can be `false`, which is OK
validate_name(replaced_by, "the replaced_by field of parameter ", name)
if replaced_by == name then
internal_process_error(
"parameter %s cannot be replaced by itself.",
name
)
end
local main_param = params[replaced_by]
-- Check that the replaced_by is set to a valid parameter.
if not (main_param == true or type(main_param) == "table") then
internal_process_error(
"parameter %s is set to be replaced by an invalid parameter.",
name
)
end
-- Can't be a replaced-by of a replaced-by.
if main_param ~= true then
local main_replaced_by = main_param.replaced_by
if main_replaced_by ~= nil then
internal_process_error(
"replaced_by cannot be set to another replaced-by parameter: parameter %s is set as replaced by %s, which is in turn replaced by %s. Set replaced_by for %s to %s.",
name, replaced_by, main_replaced_by, name, main_replaced_by
)
end
end
if param.instead ~= nil then
internal_process_error("the `instead` tag can only be given when `replaced_by` is set to `false`.")
end
elseif replaced_by == false then
if param.instead ~= nil and type(param.instead) ~= "string" then
internal_process_error(
"the `instead` tag must be a string, but saw %s.",
param.instead
)
end
end
if replaced_by ~= nil then
if param.reason ~= nil and type(param.reason) ~= "string" then
internal_process_error(
"the `reason` tag must be a string, but saw %s.",
param.reason
)
end
end
if listname then
if not alias_of then
local key = name
if type(name) == "string" then
key = gsub(name, "\1", "")
end
local list_arg = {maxindex = 0}
args_new[key] = list_arg
if list_args == nil then
list_args = {}
end
list_args[key] = list_arg
end
local list_type = type(listname)
if list_type == "string" then
-- If the list property is a string, then it represents the name
-- to be used as the prefix for list items. This is for use with lists
-- where the first item is a numbered parameter and the
-- subsequent ones are named, such as 1, pl2, pl3.
patterns = save_pattern(name, listname, patterns or {})
elseif listname ~= true then
internal_process_error(
"list field for parameter %s must be a boolean, string or undefined, but saw a %s.",
name, list_type
)
elseif type(name) == "number" then
if index_list ~= nil then
internal_process_error(
"only one numeric parameter can be a list, unless the list property is a string."
)
end
-- If the name is a number, then all indexed parameters from
-- this number onwards go in the list.
index_list = name
else
patterns = save_pattern(name, name, patterns or {})
end
if find(name, "\1", nil, true) then
if args_placeholders then
placeholders_n = placeholders_n + 1
args_placeholders[placeholders_n] = name
else
args_placeholders, placeholders_n = {name}, 1
end
end
end
end
end
--Process required changes to `params`.
if args_placeholders then
for i = 1, placeholders_n do
local name = args_placeholders[i]
params[gsub(name, "\1", "")], params[name] = params[name], nil
end
end
-- Process the arguments
for name, val in pairs(args) do
any_args_set = true
validate_name(name, "argument names", nil, true)
-- Guaranteeing that all values are strings avoids issues with type coercion being inconsistent between functions.
local val_type = type(val)
if val_type ~= "string" then
internal_process_error(
"argument %s has the type %s; all arguments must be strings.",
name, val_type
)
end
local orig_name, raw_type, index, canonical = name, type(name)
if raw_type == "number" then
if index_list and name >= index_list then
index = name - index_list + 1
name = index_list
end
elseif patterns then
-- Does this argument name match a pattern?
for pattern, pname in next, patterns do
index = match(name, pattern)
-- It matches, so store the parameter name and the
-- numeric index extracted from the argument name.
if index then
index = tonumber(index)
name = pname
break
end
end
end
local param = params[name]
-- If the argument is not in the list of parameters, store it in a separate list.
if not param then
args_unknown = unknown_param(name, val, args_unknown or {})
elseif param == true then
canonical = orig_name
val = php_trim(val)
if val ~= "" then
-- If the parameter is duplicated, throw an error.
if args_new[name] ~= nil then
process_error(
"Parameter %s has been entered more than once. This is probably because a parameter alias has been used.",
canonical
)
end
args_new[name] = val
end
else
if param.replaced_by == false then
process_error(
("Parameter %%s has been removed and is no longer valid%s.%s"):format(
param.reason and ", " .. param.reason or "",
param.instead and " Instead, " .. param.instead .. "." or ""),
name
)
elseif param.replaced_by then
process_error(
("Parameter %%s has been replaced by %%s%s."):format(
param.reason and ", " .. param.reason or ""),
name, param.replaced_by
)
end
if param.deprecated then
track("deprecated parameter", name)
end
if param.require_index then
-- Disallow require_index for numeric parameter names, as this doesn't make sense.
if raw_type == "number" then
internal_process_error(
"cannot set require_index for numeric parameter %s.",
name
)
-- If a parameter without the trailing index was found, and
-- require_index is set on the param, treat it
-- as if it isn't recognized.
elseif not index then
args_unknown = unknown_param(name, val, args_unknown or {})
end
end
-- Check that separate_no_index is not being used with a numeric parameter.
if param.separate_no_index then
if raw_type == "number" then
internal_process_error(
"cannot set separate_no_index for numeric parameter %s.",
name
)
elseif type(param.alias_of) == "number" then
internal_process_error(
"cannot set separate_no_index for parameter %s, as it is an alias of numeric parameter %s.",
name, param.alias_of
)
end
end
-- If no index was found, use 1 as the default index.
-- This makes list parameters like g, g2, g3 put g at index 1.
-- If `separate_no_index` is set, then use 0 as the default instead.
if not index and param.list then
index = param.separate_no_index and 0 or 1
end
-- Normalize to the canonical parameter name. If it's a list, but the alias is not, then determine the index.
local raw_name = param.alias_of
if raw_name then
raw_type = type(raw_name)
if raw_type == "number" then
name = raw_name
local main_param = params[raw_name]
if main_param ~= true and main_param.list then
if not index then
index = param.separate_no_index and 0 or 1
end
canonical = raw_name + index - 1
else
canonical = raw_name
end
else
name = gsub(raw_name, "\1", "")
local main_param = params[name]
if not index and main_param ~= true and main_param.list then
index = param.separate_no_index and 0 or 1
end
if not index or index == 0 then
canonical = name
elseif name == raw_name then
canonical = name .. index
else
canonical = gsub(raw_name, "\1", index)
end
end
else
canonical = orig_name
end
-- Only recognize demo parameters if this is the current template or module's
-- page, or its documentation page.
if param.demo and not is_own_page("include_documentation") then
args_unknown = unknown_param(name, val, args_unknown or {})
end
-- Remove leading and trailing whitespace unless no_trim is true.
if param.no_trim then
check_string_param_modifier(param.type, name, "no_trim")
else
val = php_trim(val)
end
-- Empty string is equivalent to nil unless allow_empty is true.
if param.allow_empty then
check_string_param_modifier(param.type, name, "allow_empty")
elseif val == "" then
-- If `disallow_missing` is set, keep track of empty parameters
-- via the `empty` field in `arg`, which will be used by the
-- `disallow_missing` check. This will be deleted before
-- returning.
if index and param.disallow_missing then
local arg = args_new[name]
local empty = arg.empty
if empty == nil then
empty = {maxindex = 0}
arg.empty = empty
end
empty[index] = true
if index > empty.maxindex then
empty.maxindex = index
end
end
val = nil
end
-- Allow boolean false.
if val ~= nil then
-- Convert to proper type if necessary.
local main_param = params[raw_name]
if main_param ~= true then
val = convert_val(val, orig_name, main_param or param)
end
-- Mark it as no longer required, as it is present.
if required then
required[name] = nil
end
-- Store the argument value.
if index then
local arg = args_new[name]
-- If the parameter is duplicated, throw an error.
if arg[index] ~= nil then
process_error(
"Parameter %s has been entered more than once. This is probably because a list parameter has been entered without an index and with index 1 at the same time, or because a parameter alias has been used.",
canonical
)
end
arg[index] = val
-- Store the highest index we find.
local maxindex = arg.maxindex
if index > maxindex then
maxindex = index
end
if arg[0] ~= nil then
arg.default, arg[0] = arg[0], nil
if maxindex < 1 then
maxindex = 1
end
end
arg.maxindex = maxindex
if not params[name].list then
args_new[name] = val
-- Don't store index 0, as it's a proxy for the default.
elseif index > 0 then
arg[index] = val
end
else
-- If the parameter is duplicated, throw an error.
if args_new[name] ~= nil then
process_error(
"Parameter %s has been entered more than once. This is probably because a parameter alias has been used.",
canonical
)
end
if not raw_name then
args_new[name] = val
else
local main_param = params[raw_name]
if main_param ~= true and main_param.list then
local main_arg = args_new[raw_name]
main_arg[1] = val
-- Store the highest index we find.
if main_arg.maxindex < 1 then
main_arg.maxindex = 1
end
else
args_new[raw_name] = val
end
end
end
end
end
end
-- Remove holes in any list parameters if needed. This must be handled
-- straight after the previous loop, as any instances of `empty` need to be
-- converted to nil.
if list_args then
for name, val in next, list_args do
handle_holes(params, val, name)
end
end
-- If the current page is the template which invoked this Lua instance, then ignore the `require` flag, as it
-- means we're viewing the template directly. Required parameters sometimes have a `template_default` key set,
-- which gets used in such cases as a demo.
-- Note: this won't work on other pages in the Template: namespace (including the /documentation subpage),
-- or if the #invoke: is on a page in another namespace.
local pagename_set = args_new.pagename
-- Handle defaults.
for name, param in pairs(params) do
if param ~= true then
local arg_new = args_new[name]
if arg_new == nil then
args_new[name] = convert_default_val(name, param, pagename_set, any_args_set, true)
elseif param.list and arg_new[1] == nil then
local default_val = convert_default_val(name, param, pagename_set, any_args_set)
if default_val ~= nil then
arg_new[1] = default_val
if arg_new.maxindex == 0 then
arg_new.maxindex = 1
end
end
end
end
end
-- Flatten nested lists if called for. This must come after setting the default.
if list_args then
for name, val in next, list_args do
args_new[name] = maybe_flatten(params, val, name)
end
end
-- The required table should now be empty.
-- If any parameters remain, throw an error, unless we're on the current template or module's page.
if required and next(required) ~= nil and not is_own_page() then
params_list_error(required, "required")
-- Return the arguments table.
-- If there are any unknown parameters, throw an error, unless return_unknown is set, in which case return args_unknown as a second return value.
elseif return_unknown then
return args_new, args_unknown or {}
elseif args_unknown and next(args_unknown) ~= nil then
params_list_error(args_unknown, "not used by this template")
end
return args_new
end
return export
c23kzesggrlowmn06oii7zu1bbzht4t
মডিউল:affix
828
21839
507791
323203
2026-04-14T06:48:23Z
Redmin
6857
[[en:Module:affix|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507791
Scribunto
text/plain
local export = {}
local debug_force_cat = false -- if set to true, always display categories even on userspace pages
local m_links = require("Module:links")
local m_str_utils = require("Module:string utilities")
local m_table = require("Module:table")
local en_utilities_module = "Module:en-utilities"
local etymology_module = "Module:etymology"
local pron_qualifier_module = "Module:pron qualifier"
local scripts_module = "Module:scripts"
local utilities_module = "Module:utilities"
-- Export this so the category code in [[Module:category tree/etymology]] can access it.
export.affix_lang_data_module_prefix = "Module:affix/lang-data/"
local rsub = m_str_utils.gsub
local usub = m_str_utils.sub
local ulen = m_str_utils.len
local rfind = m_str_utils.find
local rmatch = m_str_utils.match
local pluralize = require(en_utilities_module).pluralize
local u = m_str_utils.char
local ucfirst = m_str_utils.ucfirst
local unpack = unpack or table.unpack -- Lua 5.2 compatibility
function export.affix_variants(canonical, variants)
local mappings = {}
for _, variant in ipairs(variants) do
mappings[variant] = canonical
end
return mappings
end
function export.id_mapping(default, ids)
local mapping = { default = default }
if ids then
for id, target in pairs(ids) do
mapping[id] = target
end
end
return mapping
end
function export.id_mapping_with_affix_variants(base, id_variants)
local mappings = {}
for id, variants in pairs(id_variants) do
for _, variant in ipairs(variants) do
mappings[variant] = export.id_mapping(base, {[id] = base})
end
end
return mappings
end
function export.merge_tables(...)
local result = {}
for i = 1, select('#', ...) do
local t = select(i, ...)
if t then
for k, v in pairs(t) do
result[k] = v
end
end
end
return result
end
-- Export this so the category code in [[Module:category tree/etymology]] can access it.
export.langs_with_lang_specific_data = {
["az"] = true,
["fi"] = true,
["fr"] = true,
["izh"] = true,
["la"] = true,
["sah"] = true,
["tr"] = true,
["trk-pro"] = true,
}
local default_pos = "term"
--[==[ intro:
===About different types of hyphens ("template", "display" and "lookup"):===
* The "template hyphen" is the per-script hyphen character that is used in template calls to indicate that a term is an
affix. This is always a single Unicode char, but there may be multiple possible hyphens for a given script. Normally
this is just the regular hyphen character "-", but for some non-Latin-script languages (currently only right-to-left
languages), it is different.
* The "display hyphen" is the string (which might be an empty string) that is added onto a term as displayed and linked,
to indicate that a term is an affix. Currently this is always either the same as the template hyphen or an empty
string, but the code below is written generally enough to handle arbitrary display hyphens. Specifically:
*# For East Asian languages, the display hyphen is always blank.
*# For Arabic-script languages, either tatweel (ـ) or ZWNJ (zero-width non-joiner) are allowed as template hyphens,
where ZWNJ is supported primarily for Farsi, because some suffixes have non-joining behavior. The display hyphen
corresponding to tatweel is also tatweel, but the display hyphen corresponding to ZWNJ is blank (tatweel is also
the default display hyphen, for calls to {{tl|prefix}}/{{tl|suffix}}/etc. that don't include an explicit hyphen).
* The "lookup hyphen" is the hyphen that is used when looking up language-specific affix mappings. (These mappings are
discussed in more detail below when discussing link affixes.) It depends only on the script of the affix in question.
Most scripts (including East Asian scripts) use a regular hyphen "-" as the lookup hyphen, but Hebrew and Arabic
have their own lookup hyphens (respectively maqqef and tatweel). Note that for Arabic in particular, there are
three possible template hyphens that are recognized (tatweel, ZWNJ and regular hyphen), but mappings must use tatweel.
===About different types of affixes ("template", "display", "link", "lookup" and "category"):===
* A "template affix" is an affix in its source form as it appears in a template call. Generally, a template affix has an
attached template hyphen (see above) to indicate that it is an affix and indicate what type of affix it is (prefix,
suffix, interfix or circumfix), but some of the older-style templates such as {{tl|suffix}}, {{tl|prefix}},
{{tl|confix}}, etc. have "positional" affixes where the presence of the affix in a certain position (e.g. the second
or third parameter) indicates that it is a certain type of affix, whether or not it has an attached template hyphen.
* A "display affix" is the corresponding affix as it is actually displayed to the user. The display affix may differ
from the template affix for various reasons:
*# The display affix may be specified explicitly using the {{para|alt<var>N</var>}} parameter, the `<alt:...>` inline
modifier or a piped link of the form e.g. `<nowiki>[[-kas|-käs]]</nowiki>` (here indicating that the affix should
display as `-käs` but be linked as `-kas`). Here, the template affix is arguably the entire piped link, while the
display affix is `-käs`.
*# Even in the absence of {{para|alt<var>N</var>}} parameters, `<alt:...>` inline modifiers and piped links, certain
languages have differences between the "template hyphen" specified in the template (which always needs to be
specified somehow or other in templates like {{tl|affix}}, to indicate that the term is an affix and what type of
affix it is) and the display hyphen (see above), with corresponding differences between template and display
affixes.
* A (regular) "link affix" is the affix that is linked to when the affix is shown to the user. The link affix is usually
the same as the display affix, but will differ in one of three circumstances:
*# The display and link affixes are explicitly made different using {{para|alt<var>N</var>}} parameters, `<alt:...>`
inline modifiers or piped links, as described above under "display affix".
*# For certain languages, certain affixes are mapped to canonical form using language-specific mappings. For example,
in Finnish, the adjective-forming suffix {{m|fi|-kas}} appears as {{m|fi|-käs}} after front vowels, but logically
both forms are the same suffix and should be linked and categorized the same. Similarly, in Latin, the negative and
intensive prefixes spelled {{m|la|in-}} (etymologically two distinct prefixes) appear variously as {{m|la|il-}},
{{m|la|im-}} or {{m|la|ir-}} before certain consonants. Mappings are supplied in [[Module:affix/lang-data/LANGCODE]]
to convert Finnish {{m|fi|-käs}} to {{m|fi|-kas}} for linking and categorization purposes. Note that the affixes in
the mappings use "lookup hyphens" to indicate the different types of affixes, which is usually the same as the
template hyphen but differs for Arabic scripts, because there are multiple possible template hyphens recognized but
only one lookup hyphen (tatweel). The form of the affix as used to look up in the mapping tables is called the
"lookup affix"; see below.
* A "stripped link affix" is a link affix that has been passed through the language's `stripDiacritics()` function, which
may strip certain diacritics: e.g. macrons in Latin and Old English (indicating length); acute and grave accents in
Russian and various other Slavic languages (indicating stress); vowel diacritics in most Arabic-script languages; and
also tatweel in some Arabic-script languages (currently, for example, Persian, Arabic and Urdu strip tatweel, but
Ottoman Turkish does not). Stripped link affixes are currently what are used in category names.
* A "lookup affix" is the form of the affix as it is looked up in the language-specific lookup mappings described above
under link affixes. There are actually two lookup stages:
*# First, the affix is looked up in a modified display form (specifically, the same as the display affix but using
lookup hyphens). Note that this lookup does not occur if an explicit display form is given using
{{para|alt<var>N</var>}} or an `<alt:...>` inline modifier, or if the template affix contains a piped or embedded
link.
*# If no entry is found, the affix is then looked up in a modified link form (specifically, the modified display
form passed through the language's `stripDiacritics()` function, which strips out certain diacritics, but with the
lookup hyphen re-added if it was stripped out, as in the case of tatweel in many Arabic-script languages).
The reason for this double lookup procedure is to allow for mappings that are sensitive to the extra diacritics, but
also allow for mappings that are not sensitive in this fashion (e.g. Russian {{m|ru|-ливый}} occurs both stressed and
unstressed, but is the same prefix either way).
* A "category affix" is the affix as it appears in categories such as [[:Category:Finnish terms suffixed with -kas|
Category:Finnish terms suffixed with ''-kas'']]. The category affix is currently always the same as the stripped link
affix. This means that for Arabic-script languages, it may or may not have a tatweel, even if the correponding display
affix and regular link affix have a tatweel. As mentioned above, stripDiacritics() strips tatweel for Arabic, Persian
and Urdu, but not for Ottoman Turkish. Hence affix categories for Arabic, Persian and Urdu will be missing the
tatweel, but affix categories for Ottoman Turkish will have it. An additional complication is that if the template
affix contains a ZWNJ, the display (and hence the link and category affixes) will have no hyphen attached in any case.
]==]
-----------------------------------------------------------------------------------------
-- Template and display hyphens --
-----------------------------------------------------------------------------------------
--[=[
Per-script template hyphens. The template hyphen is what appears in the {{affix}}/{{prefix}}/{{suffix}}/etc. template
(in the wikicode). See above.
They key below is a script code, after removing a hyphen and anything preceding. Hence, script codes like 'fa-Arab'
and 'ur-Arab' will match 'Arab'.
The value below is a string consisting of one or more hyphen characters. If there is more than one character, the
default hyphen must come last and a non-default function must be specified for the script in display_hyphens[] so
the correct display hyphen will be specified when no template hyphen is given (in {{suffix}}/{{prefix}}/etc.).
Script detection is normally done when linking, but we need to do it earlier. However, under most circumstances we
don't need to do script detection. Specifically, we only need to do script detection for a given language if
(a) the language has multiple scripts; and
(b) at least one of those scripts is listed below or in display_hyphens.
]=]
local ZWNJ = u(0x200C) -- zero-width non-joiner
local template_hyphens = {
-- This covers all Arabic scripts. See above.
["Arab"] = "ـ" .. ZWNJ .. "-", -- tatweel + zero-width non-joiner + regular hyphen
["Hebr"] = "־", -- Hebrew-specific hyphen termed "maqqef"
["Mong"] = "᠊",
-- FIXME! What about the following right-to-left scripts?
-- Adlm (Adlam)
-- Armi (Imperial Aramaic)
-- Avst (Avestan)
-- Cprt (Cypriot)
-- Khar (Kharoshthi)
-- Mand (Mandaic/Mandaean)
-- Mani (Manichaean)
-- Mend (Mende/Mende Kikakui)
-- Narb (Old North Arabian)
-- Nbat (Nabataean/Nabatean)
-- Nkoo (N'Ko)
-- Orkh (Orkhon runes)
-- Phli (Inscriptional Pahlavi)
-- Phlp (Psalter Pahlavi)
-- Phlv (Book Pahlavi)
-- Phnx (Phoenician)
-- Prti (Inscriptional Parthian)
-- Rohg (Hanifi Rohingya)
-- Samr (Samaritan)
-- Sarb (Old South Arabian)
-- Sogd (Sogdian)
-- Sogo (Old Sogdian)
-- Syrc (Syriac)
-- Thaa (Thaana)
}
-- Hyphens used when looking up an affix in a lang-specific affix mapping. Defaults to regular hyphen (-). The keys
-- are script codes, after removing a hyphen and anything preceding. Hence, script codes like 'fa-Arab' and 'ur-Arab'
-- will match 'Arab'. The value should be a single character.
local lookup_hyphens = {
["Hebr"] = "־",
-- This covers all Arabic scripts. See above.
["Arab"] = "ـ",
}
-- Default display-hyphen function.
local function default_display_hyphen(script, hyph)
if not hyph then
return template_hyphens[script] or "-"
end
return hyph
end
local function arab_get_display_hyphen(script, hyph)
if not hyph then
return "ـ" -- tatweel
elseif hyph == ZWNJ then
return ""
else
return hyph
end
end
local function no_display_hyphen(script, hyph)
return ""
end
-- Per-script function to return the correct display hyphen given the script and template hyphen. The function should
-- also handle the case where the passed-in template hyphen is nil, corresponding to the situation in
-- {{prefix}}/{{suffix}}/etc. where no template hyphen is specified. The key is the script code after removing a hyphen
-- and anything preceding, so 'fa-Arab', 'ur-Arab' etc. will match 'Arab'.
local display_hyphens = {
-- This covers all Arabic scripts. See above.
["Arab"] = arab_get_display_hyphen,
["Bopo"] = no_display_hyphen,
["Hani"] = no_display_hyphen,
["Hans"] = no_display_hyphen,
["Hant"] = no_display_hyphen,
-- The following is a mixture of several scripts. Hopefully the specs here are correct!
["Jpan"] = no_display_hyphen,
["Jurc"] = no_display_hyphen,
["Kitl"] = no_display_hyphen,
["Kits"] = no_display_hyphen,
["Laoo"] = no_display_hyphen,
["Nshu"] = no_display_hyphen,
["Shui"] = no_display_hyphen,
["Tang"] = no_display_hyphen,
["Thaa"] = no_display_hyphen,
["Thai"] = no_display_hyphen,
["Tibt"] = no_display_hyphen,
}
-----------------------------------------------------------------------------------------
-- Basic Utility functions --
-----------------------------------------------------------------------------------------
local function glossary_link(entry, text)
text = text or entry
return "[[Appendix:Glossary#" .. entry .. "|" .. text .. "]]"
end
local function track(page)
if type(page) == "table" then
for i, pg in ipairs(page) do
page[i] = "affix/" .. pg
end
else
page = "affix/" .. page
end
require("Module:debug/track")(page)
end
local function ine(val)
return val ~= "" and val or nil
end
-----------------------------------------------------------------------------------------
-- Compound types --
-----------------------------------------------------------------------------------------
local function make_compound_type(typ, alttext)
return {
text = glossary_link(typ, alttext) .. " compound",
cat = typ .. " compounds",
}
end
-- Make a compound type entry with a simple rather than glossary link.
-- These should be replaced with a glossary link when the entry in the glossary
-- is created.
local function make_non_glossary_compound_type(typ, alttext)
local link = alttext and "[[" .. typ .. "|" .. alttext .. "]]" or "[[" .. typ .. "]]"
return {
text = link .. " compound",
cat = typ .. " compounds",
}
end
local function make_raw_compound_type(typ, alttext)
return {
text = glossary_link(typ, alttext),
cat = pluralize(typ),
}
end
local function make_borrowing_type(typ, alttext)
return {
text = glossary_link(typ, alttext),
borrowing_type = pluralize(typ),
}
end
export.etymology_types = {
["adapted borrowing"] = make_borrowing_type("adapted borrowing"),
["adap"] = "adapted borrowing",
["abor"] = "adapted borrowing",
["alliterative"] = make_non_glossary_compound_type("alliterative"),
["allit"] = "alliterative",
["antonymous"] = make_non_glossary_compound_type("antonymous"),
["ant"] = "antonymous",
["bahuvrihi"] = make_compound_type("bahuvrihi", "bahuvrīhi"),
["bahu"] = "bahuvrihi",
["bv"] = "bahuvrihi",
["coordinative"] = make_compound_type("coordinative"),
["coord"] = "coordinative",
["descriptive"] = make_compound_type("descriptive"),
["desc"] = "descriptive",
["determinative"] = make_compound_type("determinative"),
["det"] = "determinative",
["dvandva"] = make_compound_type("dvandva"),
["dva"] = "dvandva",
["dvigu"] = make_compound_type("dvigu"),
["dvi"] = "dvigu",
["endocentric"] = make_compound_type("endocentric"),
["endo"] = "endocentric",
["exocentric"] = make_compound_type("exocentric"),
["exo"] = "exocentric",
["izafet I"] = make_compound_type("izafet I"),
["iz1"] = "izafet I",
["izafet II"] = make_compound_type("izafet II"),
["iz2"] = "izafet II",
["izafet III"] = make_compound_type("izafet III"),
["iz3"] = "izafet III",
["karmadharaya"] = make_compound_type("karmadharaya", "karmadhāraya"),
["karma"] = "karmadharaya",
["kd"] = "karmadharaya",
["kenning"] = make_raw_compound_type("kenning"),
["ken"] = "kenning",
["rhyming"] = make_non_glossary_compound_type("rhyming"),
["rhy"] = "rhyming",
["synonymous"] = make_non_glossary_compound_type("synonymous"),
["syn"] = "synonymous",
["tatpurusa"] = make_compound_type("tatpurusa", "tatpuruṣa"),
["tat"] = "tatpurusa",
["tp"] = "tatpurusa",
}
local function process_etymology_type(typ, nocap, notext, has_parts)
local text_sections = {}
local categories = {}
local borrowing_type
if typ then
local typdata = export.etymology_types[typ]
if type(typdata) == "string" then
typdata = export.etymology_types[typdata]
end
if not typdata then
error("Internal error: Unrecognized type '" .. typ .. "'")
end
local text = typdata.text
if not nocap then
text = ucfirst(text)
end
local cat = typdata.cat
borrowing_type = typdata.borrowing_type
local oftext = typdata.oftext or " of"
if not notext then
table.insert(text_sections, text)
if has_parts then
table.insert(text_sections, oftext)
table.insert(text_sections, " ")
end
end
if cat then
table.insert(categories, cat)
end
end
return text_sections, categories, borrowing_type
end
-----------------------------------------------------------------------------------------
-- Utility functions --
-----------------------------------------------------------------------------------------
-- Iterate an array up to the greatest integer index found.
local function ipairs_with_gaps(t)
local indices = m_table.numKeys(t)
local max_index = #indices > 0 and math.max(unpack(indices)) or 0
local i = 0
return function()
while i < max_index do
i = i + 1
return i, t[i]
end
end
end
export.ipairs_with_gaps = ipairs_with_gaps
--[==[
Join formatted parts (in `parts_formatted`) together with any overall {{para|lit}} spec (in `lit`) plus categories,
which are formatted by prepending the language name as found in `lang`. The value of an entry in `categories` can be
either a string (which is formatted using `sort_key`) or a table of the form `{ {cat=<var>category</var>,
sort_key=<var>sort_key</var>, sort_base=<var>sort_base</var>}`, specifying the sort key and sort base to use when
formatting the category. If `nocat` is given, no categories are added; otherwise, `force_cat` causes categories to be
added even on userspace pages.
]==]
function export.join_formatted_parts(data)
local cattext
local lang = data.data.lang
local force_cat = data.data.force_cat or debug_force_cat
if data.data.nocat then
cattext = ""
else
for i, cat in ipairs(data.categories) do
if type(cat) == "table" then
data.categories[i] = require(utilities_module).format_categories(lang:getFullName() .. " " .. cat.cat,
lang, cat.sort_key, cat.sort_base, force_cat)
else
data.categories[i] = require(utilities_module).format_categories(lang:getFullName() .. " " .. cat, lang,
data.data.sort_key, nil, force_cat)
end
end
cattext = table.concat(data.categories)
end
local result = table.concat(data.parts_formatted, not data.separator_already_added and " +‎ " or nil) ..
(data.data.lit and ", literally " .. m_links.mark(data.data.lit, "gloss") or "")
local q = data.data.q
local qq = data.data.qq
local l = data.data.l
local ll = data.data.ll
local infl = data.data.infl
if q and q[1] or qq and qq[1] or l and l[1] or ll and ll[1] or infl and infl[1] then
result = require(pron_qualifier_module).format_qualifiers {
lang = lang,
text = result,
q = q,
qq = qq,
l = l,
ll = ll,
infl = infl,
}
end
return result .. cattext
end
local function pluralize(pos)
if pos ~= "nouns" and usub(pos, -5) ~= "verbs" and usub(pos, -4) ~= "ives" then
if pos:find("[sx]$") then
pos = pos .. "es"
else
pos = pos .. "s"
end
end
return pos
end
-- Remove links and call lang:stripDiacritics(term).
local function strip_diacritics_no_links(lang, term)
return lang:stripDiacritics(m_links.remove_links(term))
end
--[=[
Convert a raw part as passed into an entry point into a part ready for linking. `lang` and `sc` are the overall
language and script objects. This uses the overall language and script objects as defaults for the part and parses off
any fragment from the term. We need to do the latter so that fragments don't end up in categories and so that we
correctly do affix mapping even in the presence of fragments.
]=]
local function canonicalize_part(part, lang, sc)
if not part then
return
end
-- Save the original (user-specified, part-specific) value of `lang`. If such a value is specified, we don't insert
-- a '*fixed with' category, and we format the part using format_derived() in [[Module:etymology]] rather than
-- full_link() in [[Module:links]].
part.part_lang = part.lang
part.lang = part.lang or lang
part.sc = part.sc or sc
local term = part.term
if not term then
return
elseif not part.fragment then
part.term, part.fragment = m_links.get_fragment(term)
else
part.term = m_links.get_fragment(term)
end
end
--[==[
Construct a single linked part based on the information in `part`, for use by `show_affix()` and other entry points.
This should be called after `canonicalize_part()` is called on the part. This is a thin wrapper around `full_link()` in
[[Module:links]] unless `part.part_lang` is specified (indicating that a part-specific language was given), in which
case `format_derived()` in [[Module:etymology]] is called to display a term in a language other than the language of
the overall term (specified in `data.lang`). `data` contains the entire object passed into the entry point and is used
to access information for constructing the categories added by `format_derived()`.
]==]
function export.link_term(part, data, include_separator)
local result
if part.part_lang then
result = require(etymology_module).format_derived {
lang = data.lang,
terms = {part},
sources = {part.lang},
sort_key = data.sort_key,
nocat = data.nocat,
template_name = "affix",
qualifiers_labels_on_outside = true,
borrowing_type = data.borrowing_type,
force_cat = data.force_cat or debug_force_cat,
}
else
result = m_links.full_link(part, "term", nil, "show qualifiers")
end
if include_separator and part.separator then
return part.separator .. result
else
return result
end
end
local function canonicalize_script_code(scode)
-- Convert fa-Arab, ur-Arab etc. to Arab.
return (scode:gsub("^.*%-", ""))
end
-----------------------------------------------------------------------------------------
-- Affix-handling functions --
-----------------------------------------------------------------------------------------
-- Figure out the appropriate script for the given affix and language (unless the script is explicitly passed in), and
-- return the values of template_hyphens[], display_hyphens[] and lookup_hyphens[] for that script, substituting
-- default values as appropriate. Four values are returned:
-- DETECTED_SCRIPT, TEMPLATE_HYPHEN, DISPLAY_HYPHEN, LOOKUP_HYPHEN
local function detect_script_and_hyphens(text, lang, sc)
local scode
-- 1. If the script is explicitly passed in, use it.
if sc then
scode = sc:getCode()
else
local possible_script_codes = lang:getScriptCodes()
-- YUCK! `possible_script_codes` comes from loadData() so #possible_scripts doesn't work (always returns 0).
local num_possible_script_codes = m_table.length(possible_script_codes)
if num_possible_script_codes == 0 then
-- This shouldn't happen; if the language has no script codes,
-- the list {"None"} should be returned.
error("Something is majorly wrong! Language " .. lang:getCanonicalName() .. " has no script codes.")
end
if num_possible_script_codes == 1 then
-- 2. If the language has only one possible script, use it.
scode = possible_script_codes[1]
else
-- 3. Check if any of the possible scripts for the language have non-default values for template_hyphens[]
-- or display_hyphens[]. If so, we need to do script detection on the text. If not, just use "Latn",
-- which may not be technically correct but produces the right results because Latn has all default
-- values for template_hyphens[] and display_hyphens[].
local may_have_nondefault_hyphen = false
for _, script_code in ipairs(possible_script_codes) do
script_code = canonicalize_script_code(script_code)
if template_hyphens[script_code] or display_hyphens[script_code] then
may_have_nondefault_hyphen = true
break
end
end
if not may_have_nondefault_hyphen then
scode = "Latn"
else
scode = lang:findBestScript(text):getCode()
end
end
end
scode = canonicalize_script_code(scode)
local template_hyphen = template_hyphens[scode] or "-"
local lookup_hyphen = lookup_hyphens[scode] or "-"
local display_hyphen = display_hyphens[scode] or default_display_hyphen
return scode, template_hyphen, display_hyphen, lookup_hyphen
end
--[=[
Given a template affix `term` and an affix type `affix_type`, change the relevant template hyphen(s) in the affix to
the display or lookup hyphen specified in `new_hyphen`, or add them if they are missing. `new_hyphen` can be a string,
specifying a fixed hyphen, or a function of two arguments (the script code `scode` and the discovered template hyphen,
or nil of no relevant template hyphen is present). `thyph_re` is a Lua pattern (which must be enclosed in parens) that
matches the possible template hyphens. Note that not all template hyphens present in the affix are changed, but only
the "relevant" ones (e.g. for a prefix, a relevant template hyphen is one coming at the end of the affix).
]=]
local function reconstruct_term_per_hyphens(term, affix_type, scode, thyph_re, new_hyphen)
local function get_hyphen(hyph)
if type(new_hyphen) == "string" then
return new_hyphen
end
return new_hyphen(scode, hyph)
end
if affix_type == "non-affix" then
return term
elseif affix_type == "circumfix" then
local before, before_hyphen, after_hyphen, after = rmatch(term, "^(.*)" .. thyph_re .. " " .. thyph_re
.. "(.*)$")
if not before or ulen(term) <= 3 then
-- Unlike with other types of affixes, don't try to add hyphens in the middle of the term to convert it to
-- a circumfix. Also, if the term is just hyphen + space + hyphen, return it.
return term
end
return before .. get_hyphen(before_hyphen) .. " " .. get_hyphen(after_hyphen) .. after
elseif affix_type == "infix" or affix_type == "interfix" then
local before_hyphen, middle, after_hyphen = rmatch(term, "^" .. thyph_re .. "(.*)" .. thyph_re .. "$")
if before_hyphen and ulen(term) <= 1 then
-- If the term is just a hyphen, return it.
return term
end
return get_hyphen(before_hyphen) .. (middle or term) .. get_hyphen(after_hyphen)
elseif affix_type == "prefix" then
local middle, after_hyphen = rmatch(term, "^(.*)" .. thyph_re .. "$")
if middle and ulen(term) <= 1 then
-- If the term is just a hyphen, return it.
return term
end
return (middle or term) .. get_hyphen(after_hyphen)
elseif affix_type == "suffix" then
local before_hyphen, middle = rmatch(term, "^" .. thyph_re .. "(.*)$")
if before_hyphen and ulen(term) <= 1 then
-- If the term is just a hyphen, return it.
return term
end
return get_hyphen(before_hyphen) .. (middle or term)
else
error(("Internal error: Unrecognized affix type '%s'"):format(affix_type))
end
end
--[=[
Look up a mapping from a given affix variant to the canonical form used in categories and links. The lookup tables are
language-specific according to `lang`, and may be ID-specific according to `affix_id`. The affixes as they appear in the
lookup tables (both the variant and the canonical form) are in "lookup affix" format (approximately speaking, they use a
regular hyphen for most scripts, but a tatweel for Arabic-script entries and a maqqef for Hebrew-script entries), but
the passed-in `affix` param is in "template affix" format (which differs from the lookup affix for Arabic-script
entries, because more types of hyphens are allowed in template affixes; see the comments at the top of the file). The
remaining parameters to this function are used to convert from template affixes to lookup affixes; see the
reconstruct_term_per_hyphens() function above.
If the affix contains brackets, no lookup is done. Otherwise, a two-stage process is used, first looking up the affix
directly and then stripping diacritics and looking it up again. The reason for this is documented above in the comments
at the top of the file (specifically, the comments describing lookup affixes).
The value of a mapping can either be a string (do the mapping regardless of affix ID) or a table indexed by affix ID
(where the special value `false` indicates no affix ID). The values of entries in this table can also be strings, or
tables with keys `affix` and `id` (again, use `false` to indicate no ID). This allows an affix mapping to map from one
ID to another (for example, this is used in English to map the [[an-]] prefix with no ID to the [[a-]] prefix with the
ID 'not').
The Given a template affix `term` and an affix type `affix_type`, change the relevant template hyphen(s) in the affix to
the display or lookup hyphen specified in `new_hyphen`, or add them if they are missing. `new_hyphen` can be a string,
specifying a fixed hyphen, or a function of two arguments (the script code `scode` and the discovered template hyphen,
or nil of no relevant template hyphen is present). `thyph_re` is a Lua pattern (which must be enclosed in parens) that
matches the possible template hyphens. Note that not all template hyphens present in the affix are changed, but only
the "relevant" ones (e.g. for a prefix, a relevant template hyphen is one coming at the end of the affix).
]=]
local function lookup_affix_mapping(affix, affix_type, lang, scode, thyph_re, lookup_hyph, affix_id)
local function do_lookup(affix)
-- Ensure that the affix uses lookup hyphens regardless of whether it used a different type of hyphens before
-- or no hyphens.
local lookup_affix = reconstruct_term_per_hyphens(affix, affix_type, scode, thyph_re, lookup_hyph)
local function do_lookup_for_langcode(langcode)
if export.langs_with_lang_specific_data[langcode] then
local langdata = mw.loadData(export.affix_lang_data_module_prefix .. langcode)
if langdata.affix_mappings then
local mapping = langdata.affix_mappings[lookup_affix]
if mapping then
if type(mapping) == "table" then
mapping = mapping[affix_id] or mapping.default or mapping[affix_id or false]
if mapping then
return mapping
end
else
return mapping
end
end
end
end
end
-- If `lang` is an etymology-only language, look for a mapping both for it and its full parent.
local langcode = lang:getCode()
local mapping = do_lookup_for_langcode(langcode)
if mapping then
return mapping
end
local full_langcode = lang:getFullCode()
if full_langcode ~= langcode then
mapping = do_lookup_for_langcode(full_langcode)
if mapping then
return mapping
end
end
return nil
end
if affix:find("%[%[") then
return nil
end
return do_lookup(affix) or do_lookup(lang:stripDiacritics(affix)) or nil
end
--[==[
For a given template term in a given language (see the definition of "template affix" near the top of the file),
possibly in an explicitly specified script `sc` (but usually nil), return the term's affix type ({"prefix"},
{"interfix"}, {"suffix"}, {"circumfix"} or {"non-affix"}) along with the corresponding link and display affixes
(see definitions near the top of the file); also the corresponding lookup affix (if `return_lookup_affix` is specified).
The term passed in should already have any fragment (after the # sign) parsed off of it. Four values are returned:
`affix_type`, `link_term`, `display_term` and `lookup_term`. The affix type can be passed in instead of autodetected; in
this case, the template term need not have any attached hyphens, and the appropriate hyphens will be added in the
appropriate places. If `do_affix_mapping` is specified, look up the affix in the lang-specific affix mappings, as
described in the comment at the top of the file; otherwise, the link and display terms will always be the same. (They
will be the same in any case if the template term has a bracketed link in it or is not an affix.) If
`return_lookup_affix` is given, the fourth return value contains the term with appropriate lookup hyphens in the
appropriate places; otherwise, it is the same as the display term. (This functionality is used in
[[Module:category tree/affixes and compounds]] to convert link affixes into lookup affixes so that they can be looked up
in the affix mapping tables.)
]==]
local function parse_term_for_affixes(term, lang, sc, affix_type, do_affix_mapping, return_lookup_affix, affix_id)
if not term then
return "non-affix", nil, nil, nil
end
if term == "^" then
-- Indicates a null term to emulate the behavior of {{suffix|foo||bar}}.
term = ""
return "non-affix", term, term, term
end
if term:find("^%^") then
-- HACK! ^ at the beginning of Korean languages has a special meaning, triggering capitalization of the
-- transliteration. Don't interpret it as "force non-affix" for those languages.
local langcode = lang:getCode()
if langcode ~= "ko" and langcode ~= "okm" and langcode ~= "jje" then
-- Formerly we allowed ^ to force non-affix type; this is now handled using an inline modifier
-- <naf>, <root>, etc. Throw an error for the moment when the old way is encountered.
error("Use of ^ to force non-affix status is no longer supported; use an inline modifier <naf> or <root> " ..
"after the component")
end
end
-- Remove an asterisk if the morpheme is reconstructed and add it back at the end.
local reconstructed = ""
if term:find("^%*") then
reconstructed = "*"
term = term:gsub("^%*", "")
end
local scode, thyph, dhyph, lhyph = detect_script_and_hyphens(term, lang, sc)
thyph = "([" .. thyph .. "])"
if not affix_type then
if rfind(term, thyph .. " " .. thyph) then
affix_type = "circumfix"
else
local has_beginning_hyphen = rfind(term, "^" .. thyph)
local has_ending_hyphen = rfind(term, thyph .. "$")
if has_beginning_hyphen and has_ending_hyphen then
affix_type = "interfix"
elseif has_ending_hyphen then
affix_type = "prefix"
elseif has_beginning_hyphen then
affix_type = "suffix"
else
affix_type = "non-affix"
end
end
end
local link_term, display_term, lookup_term
if affix_type == "non-affix" then
link_term = term
display_term = term
lookup_term = term
else
display_term = reconstruct_term_per_hyphens(term, affix_type, scode, thyph, dhyph)
if do_affix_mapping then
link_term = lookup_affix_mapping(term, affix_type, lang, scode, thyph, lhyph, affix_id)
-- The return value of lookup_affix_mapping() may be an affix mapping with lookup hyphens if a mapping
-- was found, otherwise nil if a mapping was not found. We need to convert to display hyphens in
-- either case, but in the latter case we can reuse the display term, which has already been converted.
if link_term then
link_term = reconstruct_term_per_hyphens(link_term, affix_type, scode, thyph, dhyph)
else
link_term = display_term
end
else
link_term = display_term
end
if return_lookup_affix then
lookup_term = reconstruct_term_per_hyphens(term, affix_type, scode, thyph, lhyph)
else
lookup_term = display_term
end
end
link_term = reconstructed .. link_term
display_term = reconstructed .. display_term
lookup_term = reconstructed .. lookup_term
return affix_type, link_term, display_term, lookup_term
end
--[==[
Add a hyphen to a term in the appropriate place, based on the specified affix type, stripping off any existing hyphens
in that place. For example, if `affix_type` == {"prefix"}, we'll add a hyphen onto the end if it's not already there (or
is of the wrong type). Three values are returned: the link term, display term and lookup term. This function is a thin
wrapper around `parse_term_for_affixes`; see the comments above that function for more information. Note that this
function is exposed externally because it is called by [[Module:category tree/affixes and compounds]]; see the comment
in `parse_term_for_affixes` for more information.
]==]
function export.make_affix(term, lang, sc, affix_type, do_affix_mapping, return_lookup_affix, affix_id)
if not (affix_type == "prefix" or affix_type == "suffix" or affix_type == "circumfix" or affix_type == "infix" or
affix_type == "interfix" or affix_type == "non-affix") then
error("Internal error: Invalid affix type " .. (affix_type or "(nil)"))
end
local _, link_term, display_term, lookup_term = parse_term_for_affixes(term, lang, sc, affix_type,
do_affix_mapping, return_lookup_affix, affix_id)
return link_term, display_term, lookup_term
end
-----------------------------------------------------------------------------------------
-- Main entry points --
-----------------------------------------------------------------------------------------
--[==[
Core categorization logic for affixes. This is shared between show_affix(), show_compound_like() and
get_affix_categories_only(). Returns the categories array and other metadata needed for formatting.
]==]
local function generate_affix_categories(data)
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
local text_sections, categories, borrowing_type =
process_etymology_type(data.type, data.surface_analysis or data.nocap, data.notext, #data.parts > 0)
data.borrowing_type = borrowing_type
-- Process each part
local whole_words = 0
local is_affix_or_compound = false
-- Canonicalize and generate links for all the parts first; then do categorization in a separate step, because when
-- processing the first part for categorization, we may access the second part and need it already canonicalized.
for i, part in ipairs_with_gaps(data.parts) do
part = part or {}
data.parts[i] = part
canonicalize_part(part, data.lang, data.sc)
-- Determine affix type and get link and display terms (see text at top of file). Store them in the part
-- (in fields that won't clash with fields used by full_link() in [[Module:links]] or link_term()), so they
-- can be used in the loop below when categorizing.
part.affix_type, part.affix_link_term, part.affix_display_term = parse_term_for_affixes(part.term,
part.lang, part.sc, part.type, not part.alt, nil, part.id)
-- If link_term is an empty string, either a bare ^ was specified or an empty term was used along with inline
-- modifiers. The intention in either case is not to link the term.
part.term = ine(part.affix_link_term)
-- If part.alt would be the same as part.term, make it nil, so that it isn't erroneously tracked as being
-- redundant alt text.
part.alt = part.alt or (part.affix_display_term ~= part.affix_link_term and part.affix_display_term) or nil
end
if not data.noaffixcat then
-- Now do categorization.
for i, part in ipairs_with_gaps(data.parts) do
local affix_type = part.affix_type
if affix_type ~= "non-affix" then
is_affix_or_compound = true
-- Make a sort key. For the first part, use the second part as the sort key; the intention is that if the
-- term has a prefix, sorting by the prefix won't be very useful so we sort by what follows, which is
-- presumably the root.
local part_sort_base = nil
local part_sort = part.sort or data.sort_key
if i == 1 and data.parts[2] and data.parts[2].term then
local part2 = data.parts[2]
-- If the second-part link term is empty, the user requested an unlinked term; avoid a wikitext error
-- by using the alt value if available.
part_sort_base = ine(part2.affix_link_term) or ine(part2.alt)
if part_sort_base then
part_sort_base = strip_diacritics_no_links(part2.lang, part_sort_base)
end
end
if part.pos and rfind(part.pos, "patronym") then
table.insert(categories, {cat = "patronymics", sort_key = part_sort, sort_base = part_sort_base})
end
if data.pos ~= "terms" and part.pos and rfind(part.pos, "diminutive") then
table.insert(categories, {cat = "diminutive " .. data.pos, sort_key = part_sort,
sort_base = part_sort_base})
end
-- Don't add a '*fixed with' category if the link term is empty or is in a different language.
if ine(part.affix_link_term) and not part.part_lang then
table.insert(categories, {cat = data.pos .. " " .. affix_type .. "ed with " ..
strip_diacritics_no_links(part.lang, part.affix_link_term) ..
(part.id and " (" .. part.id .. ")" or ""),
sort_key = part_sort, sort_base = part_sort_base})
end
else
whole_words = whole_words + 1
if whole_words == 2 then
is_affix_or_compound = true
table.insert(categories, "compound " .. data.pos)
end
end
end
-- Make sure there was either an affix or a compound (two or more non-affix terms).
if not is_affix_or_compound and not data.allow_no_affixes_or_compounds then
error("The parameters did not include any affixes, and the term is not a compound. Please provide at least one affix.")
end
end
return text_sections, categories, borrowing_type
end
--[==[
Implementation of {{tl|affix}} and {{tl|surface analysis}}. `data` contains all the information describing the affixes to
be displayed, and contains the following:
* `.lang` ('''required'''): Overall language object. Different from term-specific language objects (see `.parts` below).
* `.sc`: Overall script object (usually omitted). Different from term-specific script objects.
* `.parts` ('''required'''): List of objects describing the affixes to show. The general format of each object is as would
be passed to `full_link()`, except that the `.lang` field should be missing unless the term is of a language
different from the overall `.lang` value (in such a case, the language name is shown along with the term and
an additional "derived from" category is added). '''WARNING''': The data in `.parts` will be destructively
modified.
* `.pos`: Overall part of speech (used in categories, defaults to {"terms"}). Different from term-specific part of speech.
* `.sort_key`: Overall sort key. Normally omitted except e.g. in Japanese.
* `.type`: Type of compound, if the parts in `.parts` describe a compound. Strictly optional, and if supplied, the
compound type is displayed before the parts (normally capitalized, unless `.nocap` is given).
* `.nocap`: Don't capitalize the first letter of text displayed before the parts (relevant only if `.type` or
`.surface_analysis` is given).
* `.notext`: Don't display any text before the parts (relevant only if `.type` or `.surface_analysis` is given).
* `.nocat`: Disable all categorization.
* `.noaffixcat`: Disable affix (and compound) categorization. Relevant for e.g. blends, which may otherwise
be incorrectly categorized as compound terms.
* `.lit`: Overall literal definition. Different from term-specific literal definitions.
* `.force_cat`: Always display categories, even on userspace pages.
* `.surface_analysis`: Implement {{surface analysis}}; adds `By surface analysis, ` before the parts.
'''WARNING''': This destructively modifies both `data` and the individual structures within `.parts`.
]==]
function export.show_affix(data)
local text_sections, categories, borrowing_type = generate_affix_categories(data)
-- Process each part for display
local parts_formatted = {}
for i, part in ipairs_with_gaps(data.parts) do
-- Make a link for the part
table.insert(parts_formatted, export.link_term(part, data, "include_separator"))
end
if data.surface_analysis then
local text = "by " .. glossary_link("surface analysis") .. ", "
if not data.nocap then
text = ucfirst(text)
end
table.insert(text_sections, 1, text)
end
table.insert(text_sections, export.join_formatted_parts { data = data, parts_formatted = parts_formatted,
categories = categories, separator_already_added = true })
return table.concat(text_sections)
end
--[==[
Get only the categories that would be generated by show_affix(), without any text output or formatting.
This is used by Module:etymon to get affix categorization.
Returns an array of category objects, where
each entry is either a string (simple category name) or a table with keys `cat`, `sort_key`,
and `sort_base` for more complex categorization.
`data` should have the same structure as passed to show_affix():
* `.lang` (required): Overall language object
* `.parts` (required): Array of affix part objects with `.term`, `.lang`, `.id`, etc.
* `.pos`: Part of speech (defaults to "terms")
* `.sort_key`: Overall sort key for categories
'''WARNING''': This destructively modifies both `data` and the individual structures within `.parts`.
]==]
function export.get_affix_categories_only(data)
local text_sections, categories, borrowing_type = generate_affix_categories(data)
return categories
end
function export.show_surface_analysis(data)
data.surface_analysis = true
data.allow_no_affixes_or_compounds = true
return export.show_affix(data)
end
--[==[
Implementation of {{tl|compound}}.
'''WARNING''': This destructively modifies both `data` and the individual structures within `.parts`.
]==]
function export.show_compound(data)
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
local text_sections, categories, borrowing_type =
process_etymology_type(data.type, data.nocap, data.notext, #data.parts > 0)
data.borrowing_type = borrowing_type
local parts_formatted = {}
table.insert(categories, "compound " .. data.pos)
-- Make links out of all the parts
local whole_words = 0
for i, part in ipairs(data.parts) do
canonicalize_part(part, data.lang, data.sc)
-- Determine affix type and get link and display terms (see text at top of file).
local affix_type, link_term, display_term = parse_term_for_affixes(part.term, part.lang, part.sc,
part.type, not part.alt, nil, part.id)
-- If the term is an interfix or the type was explicitly given, recognize it as such (which means e.g. that we
-- will display the term without hyphens for East Asian languages). Otherwise, ignore the fact that it looks
-- like an affix and display as specified in the template (but pay attention to the detected affix type for
-- certain tracking purposes).
if affix_type == "interfix" or (part.type and part.type ~= "non-affix") then
-- If link_term is an empty string, either a bare ^ was specified or an empty term was used along with
-- inline modifiers. The intention in either case is not to link the term. Don't add a '*fixed with'
-- category in this case, or if the term is in a different language.
-- If part.alt would be the same as part.term, make it nil, so that it isn't erroneously tracked as being
-- redundant alt text.
if link_term and link_term ~= "" and not part.part_lang then
table.insert(categories, {cat = data.pos .. " " .. affix_type .. "ed with " ..
strip_diacritics_no_links(part.lang, link_term), sort_key = part.sort or data.sort_key})
end
part.term = link_term ~= "" and link_term or nil
part.alt = part.alt or (display_term ~= link_term and display_term) or nil
else
if affix_type ~= "non-affix" then
local langcode = data.lang:getCode()
-- If `data.lang` is an etymology-only language, track both using its code and its full parent's code.
track { affix_type, affix_type .. "/lang/" .. langcode }
local full_langcode = data.lang:getFullCode()
if langcode ~= full_langcode then
track(affix_type .. "/lang/" .. full_langcode)
end
else
whole_words = whole_words + 1
end
end
table.insert(parts_formatted, export.link_term(part, data, "include_separator"))
end
if whole_words == 1 then
track("one whole word")
elseif whole_words == 0 then
track("looks like confix")
end
table.insert(text_sections, export.join_formatted_parts { data = data, parts_formatted = parts_formatted,
categories = categories, separator_already_added = true })
return table.concat(text_sections)
end
--[==[
Implementation of {{tl|blend}}, {{tl|univerbation}} and similar "compound-like" templates.
'''WARNING''': This destructively modifies both `data` and the individual structures within `.parts`.
]==]
function export.show_compound_like(data)
data.allow_no_affixes_or_compounds = true
local text_sections, categories, borrowing_type = generate_affix_categories(data)
if data.cat then
table.insert(categories, data.cat)
end
-- Process each part for display
local parts_formatted = {}
for i, part in ipairs_with_gaps(data.parts) do
-- Make a link for the part
table.insert(parts_formatted, export.link_term(part, data, "include_separator"))
end
if #data.parts > 0 and data.oftext then
table.insert(text_sections, 1, " " .. data.oftext .. " ")
end
if data.text then
table.insert(text_sections, 1, data.text)
end
table.insert(text_sections, export.join_formatted_parts { data = data, parts_formatted = parts_formatted,
categories = categories, separator_already_added = true })
return table.concat(text_sections)
end
--[==[
Make `part` (a structure holding information on an affix part) into an affix of type `affix_type`, and apply any
relevant affix mappings. For example, if the desired affix type is "suffix", this will (in general) add a hyphen onto
the beginning of the term, alt, tr and ts components of the part if not already present. The hyphen that's added is the
"display hyphen" (see above) and may be script-specific. (In the case of East Asian scripts, the display hyphen is an
empty string whereas the template hyphen is the regular hyphen, meaning that any regular hyphen at the beginning of the
part will be effectively removed.) `lang` and `sc` hold overall language and script objects.
Note that this also applies any language-specific affix mappings, so that e.g. if the language is Finnish and the user
specified [[-käs]] in the affix and didn't specify an `.alt` value, `part.term` will contain [[-kas]] and `part.alt` will
contain [[-käs]].
This function is used by the "legacy" templates ({{tl|prefix}}, {{tl|suffix}}, {{tl|confix}}, etc.) where the nature of
the affix is specified by the template itself rather than auto-determined from the affix, as is the case with
{{tl|affix}}.
'''WARNING''': This destructively modifies `part`.
]==]
local function make_part_into_affix(part, lang, sc, affix_type)
canonicalize_part(part, lang, sc)
local link_term, display_term = export.make_affix(part.term, part.lang, part.sc, affix_type, not part.alt, nil, part.id)
part.term = link_term
-- When we don't specify `do_affix_mapping` to make_affix(), link and display terms (first and second retvals of
-- make_affix()) are the same.
-- If part.alt would be the same as part.term, make it nil, so that it isn't erroneously tracked as being
-- redundant alt text.
part.alt = part.alt and export.make_affix(part.alt, part.lang, part.sc, affix_type) or (display_term ~= link_term and display_term) or nil
local Latn = require(scripts_module).getByCode("Latn")
part.tr = export.make_affix(part.tr, part.lang, Latn, affix_type)
part.ts = export.make_affix(part.ts, part.lang, Latn, affix_type)
end
local function track_wrong_affix_type(template, part, expected_affix_type)
if part and not part.type then
local affix_type = parse_term_for_affixes(part.term, part.lang, part.sc)
if affix_type ~= expected_affix_type then
local part_name = expected_affix_type or "base"
local langcode = part.lang:getCode()
local full_langcode = part.lang:getFullCode()
require("Module:debug/track") {
template,
template .. "/" .. part_name,
template .. "/" .. part_name .. "/" .. (affix_type or "none"),
template .. "/" .. part_name .. "/" .. (affix_type or "none") .. "/lang/" .. langcode
}
-- If `part.lang` is an etymology-only language, track both using its code and its full parent's code.
if full_langcode ~= langcode then
require("Module:debug/track")(
template .. "/" .. part_name .. "/" .. (affix_type or "none") .. "/lang/" .. full_langcode
)
end
end
end
end
local function insert_affix_category(categories, pos, affix_type, part, sort_key, sort_base)
-- Don't add a '*fixed with' category if the link term is empty or is in a different language.
if part.term and not part.part_lang then
local cat = pos .. " " .. affix_type .. "ed with " .. strip_diacritics_no_links(part.lang, part.term) ..
(part.id and " (" .. part.id .. ")" or "")
if sort_key or sort_base then
table.insert(categories, {cat = cat, sort_key = sort_key, sort_base = sort_base})
else
table.insert(categories, cat)
end
end
end
--[==[
Implementation of {{tl|circumfix}}.
'''WARNING''': This destructively modifies both `data` and `.prefix`, `.base` and `.suffix`.
]==]
function export.show_circumfix(data)
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
canonicalize_part(data.base, data.lang, data.sc)
-- Hyphenate the affixes and apply any affix mappings.
make_part_into_affix(data.prefix, data.lang, data.sc, "prefix")
make_part_into_affix(data.suffix, data.lang, data.sc, "suffix")
track_wrong_affix_type("circumfix", data.prefix, "prefix")
track_wrong_affix_type("circumfix", data.base, nil)
track_wrong_affix_type("circumfix", data.suffix, "suffix")
-- Create circumfix term.
local circumfix = nil
if data.prefix.term and data.suffix.term then
circumfix = data.prefix.term .. " " .. data.suffix.term
data.prefix.alt = data.prefix.alt or data.prefix.term
data.suffix.alt = data.suffix.alt or data.suffix.term
data.prefix.term = circumfix
data.suffix.term = circumfix
end
-- Make links out of all the parts.
local parts_formatted = {}
local categories = {}
local sort_base
if data.base.term then
sort_base = strip_diacritics_no_links(data.base.lang, data.base.term)
end
table.insert(parts_formatted, export.link_term(data.prefix, data))
table.insert(parts_formatted, export.link_term(data.base, data))
table.insert(parts_formatted, export.link_term(data.suffix, data))
-- Insert the categories, but don't add a '*fixed with' category if the link term is in a different language.
if not data.prefix.part_lang then
table.insert(categories, {cat=data.pos .. " circumfixed with " .. strip_diacritics_no_links(data.prefix.lang,
circumfix), sort_key=data.sort_key, sort_base=sort_base})
end
return export.join_formatted_parts { data = data, parts_formatted = parts_formatted, categories = categories }
end
--[==[
Implementation of {{tl|confix}}.
'''WARNING''': This destructively modifies both `data` and `.prefix`, `.base` and `.suffix`.
]==]
function export.show_confix(data)
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
canonicalize_part(data.base, data.lang, data.sc)
-- Hyphenate the affixes and apply any affix mappings.
make_part_into_affix(data.prefix, data.lang, data.sc, "prefix")
make_part_into_affix(data.suffix, data.lang, data.sc, "suffix")
track_wrong_affix_type("confix", data.prefix, "prefix")
track_wrong_affix_type("confix", data.base, nil)
track_wrong_affix_type("confix", data.suffix, "suffix")
-- Make links out of all the parts.
local parts_formatted = {}
local prefix_sort_base
if data.base and data.base.term then
prefix_sort_base = strip_diacritics_no_links(data.base.lang, data.base.term)
elseif data.suffix.term then
prefix_sort_base = strip_diacritics_no_links(data.suffix.lang, data.suffix.term)
end
-- Insert the categories and parts.
local categories = {}
table.insert(parts_formatted, export.link_term(data.prefix, data))
insert_affix_category(categories, data.pos, "prefix", data.prefix, data.sort_key, prefix_sort_base)
if data.base then
table.insert(parts_formatted, export.link_term(data.base, data))
end
table.insert(parts_formatted, export.link_term(data.suffix, data))
-- FIXME, should we be specifying a sort base here?
insert_affix_category(categories, data.pos, "suffix", data.suffix)
return export.join_formatted_parts { data = data, parts_formatted = parts_formatted, categories = categories }
end
--[==[
Implementation of {{tl|infix}}.
'''WARNING''': This destructively modifies both `data` and `.base` and `.infix`.
]==]
function export.show_infix(data)
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
canonicalize_part(data.base, data.lang, data.sc)
-- Hyphenate the affixes and apply any affix mappings.
make_part_into_affix(data.infix, data.lang, data.sc, "infix")
track_wrong_affix_type("infix", data.base, nil)
track_wrong_affix_type("infix", data.infix, "infix")
-- Make links out of all the parts.
local parts_formatted = {}
local categories = {}
table.insert(parts_formatted, export.link_term(data.base, data))
table.insert(parts_formatted, export.link_term(data.infix, data))
-- Insert the categories.
-- FIXME, should we be specifying a sort base here?
insert_affix_category(categories, data.pos, "infix", data.infix)
return export.join_formatted_parts { data = data, parts_formatted = parts_formatted, categories = categories }
end
--[==[
Implementation of {{tl|prefix}}.
'''WARNING''': This destructively modifies both `data` and the structures within `.prefixes`, as well as `.base`.
]==]
function export.show_prefix(data)
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
canonicalize_part(data.base, data.lang, data.sc)
-- Hyphenate the affixes and apply any affix mappings.
for i, prefix in ipairs(data.prefixes) do
make_part_into_affix(prefix, data.lang, data.sc, "prefix")
end
for i, prefix in ipairs(data.prefixes) do
track_wrong_affix_type("prefix", prefix, "prefix")
end
track_wrong_affix_type("prefix", data.base, nil)
-- Make links out of all the parts.
local parts_formatted = {}
local first_sort_base = nil
local categories = {}
if data.prefixes[2] then
first_sort_base = ine(data.prefixes[2].term) or ine(data.prefixes[2].alt)
if first_sort_base then
first_sort_base = strip_diacritics_no_links(data.prefixes[2].lang, first_sort_base)
end
elseif data.base then
first_sort_base = ine(data.base.term) or ine(data.base.alt)
if first_sort_base then
first_sort_base = strip_diacritics_no_links(data.base.lang, first_sort_base)
end
end
for i, prefix in ipairs(data.prefixes) do
table.insert(parts_formatted, export.link_term(prefix, data))
insert_affix_category(categories, data.pos, "prefix", prefix, data.sort_key, i == 1 and first_sort_base or nil)
end
if data.base then
table.insert(parts_formatted, export.link_term(data.base, data))
else
table.insert(parts_formatted, "")
end
return export.join_formatted_parts { data = data, parts_formatted = parts_formatted, categories = categories }
end
--[==[
Implementation of {{tl|suffix}}.
'''WARNING''': This destructively modifies both `data` and the structures within `.suffixes`, as well as `.base`.
]==]
function export.show_suffix(data)
local categories = {}
data.pos = data.pos or default_pos
data.pos = pluralize(data.pos)
canonicalize_part(data.base, data.lang, data.sc)
-- Hyphenate the affixes and apply any affix mappings.
for i, suffix in ipairs(data.suffixes) do
make_part_into_affix(suffix, data.lang, data.sc, "suffix")
end
track_wrong_affix_type("suffix", data.base, nil)
for i, suffix in ipairs(data.suffixes) do
track_wrong_affix_type("suffix", suffix, "suffix")
end
-- Make links out of all the parts.
local parts_formatted = {}
if data.base then
table.insert(parts_formatted, export.link_term(data.base, data))
else
table.insert(parts_formatted, "")
end
for i, suffix in ipairs(data.suffixes) do
table.insert(parts_formatted, export.link_term(suffix, data))
end
-- Insert the categories.
for i, suffix in ipairs(data.suffixes) do
-- FIXME, should we be specifying a sort base here?
insert_affix_category(categories, data.pos, "suffix", suffix)
if suffix.pos and rfind(suffix.pos, "patronym") then
table.insert(categories, "patronymics")
end
end
return export.join_formatted_parts { data = data, parts_formatted = parts_formatted, categories = categories }
end
return export
mdstvxnr2kw23xqtqvre3ecwzj3cjlv
উইকিঅভিধান:আলোচনাসভা/সংগ্রহশালা ১
4
22345
507810
177298
2026-04-14T11:41:25Z
CommonsDelinker
39
Logo_for_the_beta_feature_FileExporter.svg কে [[চিত্র:Logo_for_FileExporter.svg]] দিয়ে প্রতিস্থাপন করা হয়েছে, কারণ: [[:c:COM:FR|File renamed]]: [[:c:COM:FR#FR3|Criterion 3]] (obvious error) · Has been a default feature since August 2020।
507810
wikitext
text/x-wiki
__কোনসম্পাদনাঅনুচ্ছেদনয়__
__কোন_নতুন_অনুচ্ছেদের_সংযোগ_নয়__
== উইকিঅভিধানের প্রজেক্ট নেমস্পেস ==
উইকিঅভিধানের প্রজেক্ট নেমস্পেস, অর্থাৎ প্রকল্প পাতাগুলোর নেমস্পেস আসছে "Wiktionary", কিন্তু যা আসা উচিৎ বাংলা ভাষায় "উইকিঅভিধান"। তাই বাগজিলায় এ সংক্রান্ত সমস্যাটির জন্য একটি বাগ ফাইল করা হয়েছে। আপনারা [https://bugzilla.wikimedia.org/show_bug.cgi?id=21432 এখানে] (21432) গিয়ে বাগটি সমাধানের জন্য ভোট করতে পারেন। কিন্তু বাগজিলায় মনে হয় ভোটে কাজ হবে না। দেখি অপেক্ষা করা ছাড়া এখন কিছু দেখছি না। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৫:০৬, ১০ নভেম্বর ২০০৯ (UTC)
:আমাদের এই সমস্যাটি মাত্র পনেরো মিনিট আগে ফিক্সড হয়েছে। সবাইকে ধন্যবাদ। সেই সাথে সুসংবাদ হচ্ছে, আগে আমাদের প্রধান পাতায় ব্রাউজ করলে ব্রাউজার ট্যাবে ইংরেজিতে সাইট টাইটেল Wiktionary প্রদর্শন করতো, যা উইকিমিডিয়া-টেক আইআরসি চ্যানেলে কন্টাক্ট করে মূল বাগের সাথে সাথে শেষে বলে এটাও ঠিক করা গেছে। ব্রাউজার ট্যাবে এখন '''উইকিঅভিধান'''-ই প্রদর্শন করছে। এমনকি পৃষ্ঠার নিচের বৃত্তান্তের পাশে লেখা উইকশনারিটাও। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৩:৪৩, ১০ ডিসেম্বর ২০০৯ (UTC)
== উইকিঅভিধানের লোগো ==
[[image:Wiktionary bn logo.png|135x155px|right]]
উইকিঅভিধানের জন্য এই লোগোটি আপলোড করেছি। ঠিক আছে কী না সবাইকে দেখে দেওয়ার অনুরোধ। উল্লেখ্য মেটাতে যে দুটো লোগো নিয়ে ভোটাভুটি চলছে এটি তার একটি অপর লোগোটি এটির চেয়ে পরিচিত হলেও এটি বাছাই করার কারণ এটিতে একটি বাংলা অক্ষর "উ" আছে, যা অন্যটাতে নেই। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ২১:১৬, ১৬ জানুয়ারি ২০১০ (UTC)
:কোন পাতায় লগো নিয়ে আলোচনা হচ্ছে তা উল্লেখ করেননি। অনুগ্রহ করে তা দিন। আর এ লগোটি কি চূড়ান্ত হয়েছে? যদি না হয়ে থাকে তাহলে এটি ব্যবহার্য নয়। উইকি প্রকল্পগুলোর লগো সব একই রকমের হয় এবং একই লগো সব ভাষার প্রকল্পে ব্যবহার করা হয়। শুধু প্রকল্পের নাম এবং স্লোগানটি বিভিন্ন ভাষায় অনূদিত হবে।--[[ব্যবহারকারী:Bellayet|Bellayet]] ০৯:১৫, ১৭ জানুয়ারি ২০১০ (UTC)
:না, লোগোটি এখনো চূড়ান্ত হয়নি তবে এটি চূড়ান্ত হবার যথেষ্ট সম্ভাবনা আছে। দুটোর ভোটই প্রায় সমান। আর উইকিঅভিধান কমিউনিটি চাইলে এ লোগোটিও ব্যবহার করতে পারে। কারণ মেটাতে বলা আছে, "Finally, each of Wiktionary's language editions will hold their own vote on whether to approve or reject the winning logo. If 60% of the Wiktionaries approve of the logo, it will be applied to all the Wiktionaries. Otherwise, this logo contest will have no effect, and each wiki will continue to use its current logo." যেহেতু ভোটে বিজিত হবে এই দুটো লোগোর মধ্যে একটি-ই, তাই আমরা চাইলে এটি ব্যবহার করতে পারি। উইকিঅভিধান পরিবার অ্যাপ্রুভ করলে মেটাতে রিকোয়েস্টসহ বাগজিলায় বাগফাইল করা হবে। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১২:১৩, ১৯ জানুয়ারি ২০১০ (UTC)
::ভোট শেষ হবে এ মাসের ৩১ তারিখ। আমরা চাইলে এ কদিন অপেক্ষাও করতে পারি। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৯:৩৯, ১৯ জানুয়ারি ২০১০ (UTC)
:::মেটার ভোট শেষ হয়েছে আমরা যে লোগোটি নির্বাচন করেছি সেটাই বেশি ভোট পেয়েছে (৫৫৮টি)। অপর লোগোটি পেয়েছে ৪৫৫টি। আমার মনে হয় সব বিচারে এটাকে আমরা উইকিঅভিধানের লোগো হিসেবে নির্বাচন করতে পারি। সবার আশু মতামত আশা করছি। লোগো লোকালাইজেশনটা জরুরী। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৬:০৪, ১ ফেব্রুয়ারি ২০১০ (UTC)
::চমৎকার! তবে লগো পরিবর্তন নিয়ে তাড়াহুড়োর দরকার নাই। লগো তৈরির ব্যাপারে এবং পরিবর্তনের ব্যাপারে মেটার ইন্সট্রাকশন ফলো করুন। সাথে সাথে অন্যান্য উইকশনারি কখন লগো পরিবর্তন করছে সে সময়টা ফলো করুন। এমন কিছু যেন না হয় যে তাড়াহুড়ো করতে গিয়ে বাংলা উইকশনারি অসংলগ্ন কিছু একটা করে ফেলেছে যা অন্যান্য উইকশনারি করেনি। সময় নিন। আবারও বলছি তাড়াহুড়ো করবেন না। আর আপনি যে লগোটি করেছেন তার বিস্তারিত ডকুমেন্টেশন করুন। এতে কোন লাইনে কোন ফন্ট ব্যবহার করেছেন, ইমেজের সাইজ কত এসব তথ্য। এটি যদি পরবর্তীতে আরও বড় করে তৈরি করতে হয় তাহলে যেন যে কেউ সহজেই এর উচ্চ রেজুলুশনের সংস্করণ তৈরি করতে পারেন।--[[ব্যবহারকারী:Bellayet|Bellayet]] ০৫:৪৫, ২ ফেব্রুয়ারি ২০১০ (UTC)
:::আমি তাড়াহুড়ো করছি না। দয়া করে দ্রুত কাজ করার সাথে তাড়াহুড়োকে গুলিয়ে ফেলবেন না। উপরে আমি মেটার ইন্সট্রাকশন বলেছি। যেহেতু কেউ-ই ৬০% ভোট পায় নি, তাই আমাদের অপশন আছে দুটোর যেকোনো একটি ব্যবহার করার। বিস্তারিত ইন্সট্রাকশন আপনি লোগো প্রোপোসাল পেইজ ও ভোটিং পেইজ-এ পাবেন। লোগোটির স্পেসিফিকেশনে আসি। প্রথম লাইনে ব্যবহার করা হয়েছে ধানসিড়ি এমজে ফন্ট (103pt) ও দ্বিতীয় লাইনে মেঘনা এমজে ফন্ট (60pt)। ইমেজ রেজ্যুলিউশন হচ্ছে ৫৬৩ x ৪৬৩। এটার এসভিজি ভার্সন তৈরি করার পরিকল্পনা আছে, তাহলে একটা বড় সাইজ পাওয়া যাবে। আর লোগোটি ঠিক আছে কি না, তা জানা খুবই সহজ। মেটার লোগো রিকোয়েস্ট পাতায় আবেদন করলে, ওঁরাই দেখে তা বলবে। সমস্যা থাকলে সেটাও শোধরানো যাবে। কিন্তু সেটা করতে হলে প্রাথমিক ভাবে আমাদের কমিউনিটিকে একমত হতে হবে, যে আমরা এরকম চাই। ওঁরা দেখবে ফন্ট ও ফন্ট সাইজের সামঞ্জস্যতা, আর কিছু না। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ০৭:০৮, ২ ফেব্রুয়ারি ২০১০ (UTC)
::আপনি যে দুটো ফন্টের নাম বললেন সে ফন্ট দুটো কি ওপেন সোর্স ফন্ট নাকি চোরাই ফন্ট। চোরাই বলছি এ কারণে যে MJ ফন্ট কপিরাইটেড যা বিজয়ের সাথে পাওয়া যায় এবং আর বিজয় এবং MJ ফন্ট ব্যবহার করতে অবশ্যই কিনে নিতে হয়। যদি চোরাই হয় তাহলে আপনি এ ফন্ট দুটো ব্যবহার করা উচিত নয়। উইকিমিডিয়ার লগোগুলো উইকিমিডিয়ার ট্রেডামার্ক করা ফলে এতে কোনো চোরাই ফন্ট ব্যবহার করা যাবে না আইনগত কারণে। সম্ভাব্য সকল ঝামেলা এড়াতে উইকিমিডিয়ার সব কাজেই ওপেন সোর্স ফন্ট ব্যবহার করা হয়। ফলে লগোর ফন্ট পরিবর্তনের প্রয়োজন আছে বলে আমি মনে করি। দ্রুত কাজ করলে ভুল হতো না, তাড়াহুড়ো বলেই এই ভুল হয়েছে।--[[ব্যবহারকারী:Bellayet|Bellayet]] ০৯:৩৪, ২ ফেব্রুয়ারি ২০১০ (UTC)
:::আমার পিসিতে বিজয় ও অভ্র দুটোই ইন্সটল করা। অভ্র দিয়েই আমি উইকিঅভিধান ও উইকিপিডিয়ায় লিখি। তাই তাড়াহুড়োর কারণে আমি ওপেন সোর্স ছেড়ে বিজয় ধরেছি—এটা গ্রহণযোগ্য যুক্তি হলো না। যাহোক এসব অর্থহীন কথাবার্তা। এ ফন্টদুটো পছন্দ হওয়ার কারণেই ব্যবহার করা। তবে হ্যাঁ, ওপেন সোর্স ফন্ট ব্যবহার করাটা যুক্তিসঙ্গত। আমার বিজয়টি কেনা নয়, আরেকজনের কেনাটা আমি ইন্সটল করেছি। সে অর্থে ক্রীত না চলেও চোরাই বলা যায় কিনা জানা নেই (হয়তো এটাকে চোরাই বলে)। যা হোক, ওপেন সোর্স ফন্ট দিয়ে শীঘ্রই একটা লোগো দিচ্ছি। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১২:৫০, ২ ফেব্রুয়ারি ২০১০ (UTC)
[[image:Wiktionary logo bn.png|135x155px|right]]
:::ওপেন সোর্সড ফন্ট দিয়ে এই নতুন লোগোটি আপলোড করা হলো। ফন্ট হিসেবে ১ম লাইনে বেনসেনহ্যান্ডরাইটিং এবং ২য় লাইনে একুশে লোহিত ব্যবহার করা হয়েছে। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৬:২৫, ২ ফেব্রুয়ারি ২০১০ (UTC)
::লগো পরিবর্তন বিষয়টি নিয়ে বিস্তারিত জানতে যে সময়টুকুর দরকার তা না নিয়ে কাজ করা তাড়াহুড়ো নয় কি? আপনি প্রথমে আপনার আইডিয়াটা আলোচনার জন্য তুলতে পারতেন। অভিজ্ঞ কাউকে জিজ্ঞাসা করতে পারতেন। তাতে সময় কিছু বেশি লাগলেও, ভুলটা হয়তো এড়ানো যেতো। এখানে ওপেনসোর্স বা বিজয় মূল বিষয় নয়। আপনি ব্যক্তিগতভাবে কি ব্যবহার করবেন তাও আমার কাছে গৌণ। বিষয়টি হলো কপিরাইটেড লগো ব্যবহার। বিজয় বা ফন্টটি যদি আপনার কেনাও হতো, তারপরেও আপনি তা উইকিমিডিয়ার কোন লগো তৈরিতে ব্যবহার করতে পারেন না। কারণ ব্যবহৃত ফন্ট আপনার কেনা, উইকিমিডিয়ার নয়। আর লগোটি উইকিমিডিয়ার ট্রেডমার্ককৃত। তাই কপিরাইটকৃত ফন্ট ব্যবহারে ভুল আপনি করলেও, দায় পড়বে উইকিমিডিয়ার ঘাড়ে। আর চোরাই শব্দে আপনি মন খারাপ করবেন না, কারণ চোরাই বলতে আমি ঐ সফটওয়্যারগুলোকেই নির্দেশ করেছি যা আমরা না কিনে অবৈধ ভাবে ব্যবহার করি। এমন সফটওয়্যার আমি নিজেও অনেক ব্যবহার করি। তবে পরের লগোটা ভাল হয়েছে। আগের লগোটার চাইতেও দেখতে ভাল লাগছে। লগো তৈরির বিস্তারিত মাপ এবং ফন্ট লগো পাতার বর্ণনায় লেখার অনুরোধ করছি।--[[ব্যবহারকারী:Bellayet|Bellayet]] ১৭:২৩, ২ ফেব্রুয়ারি ২০১০ (UTC)
:::মূল্যবান পরামর্শের জন্য ধন্যবাদ। লোগোর মাপ ও ফন্টের নাম চিত্রের পাতায় যোগ করা হয়েছে। আপনাদের সমর্থন থাকলে বর্তমান লোগো চেঞ্জ করে এই লোগোটি দেওয়ার জন্য আবেদন করা হবে। তাই ইয়েস/নো টাইপের মতামত জানানোর অনুরোধ। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৮:১০, ২ ফেব্রুয়ারি ২০১০ (UTC)
::আমি বেশ কয়েকটি ভাষার উইকশনারি ঘুরে দেখলাম, কেউই এখনও এ লগোটি ব্যবহার শুরু করে নি। আর ভোটিং এ বলা হয়েছে "If 60% of the Wiktionaries approve of the logo, it will be applied to all the Wiktionaries. Otherwise, this logo contest will have no effect, and each wiki will continue to use its current logo."। এর মানে বর্তমান যে লগো আছে তাই চলবে, বদল হবে না। যেহেতু নতুন লগোর এখনও প্রচলন শুরু হয়নি ফলে আপাতত আমি নতুন লগোতে পরিবর্তনের পক্ষে নই। বড় বড় উইকিতে এ লগোর প্রচলন হলে তখন এ উইকিতেও তা পরিবর্তন করা যাবে। আপাতত বর্তমান লগোকেই লোকালাইজ করা যেতে পারে। তেলেগু, তামিল ভাষাগুলোতে বর্তমান লগোকেই লোকালাইজ করা হয়েছে, তবে দেবনগরী স্ক্রিপ্ট থেকে আসা ভাষাগুলো যেমন হিন্দী, নেপালী, মারাঠী ভাষার উইকিতে এখনও ইংরেজী লগো দিয়েই চালিয়ে যাচ্ছে।--[[ব্যবহারকারী:Bellayet|Bellayet]] ০৪:৪৯, ৩ ফেব্রুয়ারি ২০১০ (UTC)
:::ব্যবহার এখনো শুরু হয় নি বটে, তবে লোগেটি নিয়ে কাজ শুরু হয়েছে। কমন্স গ্রাফিক ল্যাব লোগোর SVG ভার্শন এবং মেটাতে লোকালাইজেশন টেক্সট সংগ্রহের কাজও শুরু হয়েছে। আমি সেখানে বাংলা লোকালাইজেনশন টেক্সটি যোগ করেছি। [http://meta.wikimedia.org/wiki/Wiktionary/logo/refresh/localization_text এখানে] পাতাটি আপনারা দেখতে পারেন। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ০৬:১৬, ৪ ফেব্রুয়ারি ২০১০ (UTC)
:::আরেকটি ব্যাপার জানিয়ে রাখি। লোগোটির বাম পাশের লেখাযুক্ত পাতার লেখাগুলো ব্লারি করে দেওয়ার ব্যাপারে আলোচনা চলছে। এ কাজটি হয়ে গেলে নতুন এটার-ই নতুন একটা ভার্শন আসবে। তখন সেটিতে আমাদের লোকালাইজড টেক্সট বসাতে হবে। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ০৬:৪৭, ৪ ফেব্রুয়ারি ২০১০ (UTC)
::চমৎকার!! নিয়মিত খোঁজ রাখুন, অন্যান্য উইকিতে এটির ব্যবহার শুরু হলেই বাংলাতেও পরিবর্তন করে দেওয়া যাবে।--[[ব্যবহারকারী:Bellayet|Bellayet]] ১৬:১৯, ৪ ফেব্রুয়ারি ২০১০ (UTC)
== মিডিয়া আপলোড লিংক নিষ্ক্রিয় বিষয়ে ==
বাংলা উইকিঅভিধানের পেইজের বাম পাশের মিডিয়া আপলোড লিংক নিষ্ক্রিয় করার ব্যাপারে মতামত চাচ্ছি। কারণ এই প্রজেক্টে নন-ফ্রি মিডিয়ার ব্যবহারের প্রয়োজন নেই বললেই চলে। কমন্সে প্রায় সব মিডিয়াই এখন কম-বেশি পাওয়া যায়। তাছাড়া কোনো কারণে আপলোডের প্রয়োজন হলে তা প্রশাসকদের জন্য উন্মুক্ত থাকবে। উল্লেখ্য ইংরেজি উইকশনারিতেও এটা কমন্সে রিডিরেক্ট করে দেওয়া, এবং সেখানে এজন্য কোনো সমস্যা হয় না। আমরা এটা এখানেও বাস্তবায়ন করতে পারি। সবার মন্তব্য আশা করছি। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৯:৪৪, ১৯ জানুয়ারি ২০১০ (UTC)
== নতুন নেমস্পেস: উইকিসরাস ==
সমার্থক শব্দগুলো একটি নির্দিষ্ট নেমস্পেসের আওতায় আনার জন্য বাগজিলায় একটি নতুন বাগফাইল করা হয়েছে। এই নেমস্পেসটি পাওয়া গেলে সমার্থকশব্দগুলো আলাদা একটি নেমস্পেস পাবে, এবং নির্দিষ্ট করা সহজ হবে। বর্তমানে এ সংক্রান্ত ভুক্তিগুলো প্রধান নেমস্পেসেই আছে। এ সংক্রান্ত বাগটি [https://bugzilla.wikimedia.org/show_bug.cgi?id=22342 এখানে] (22342) পাওয়া যাবে। আপনারা চাইলে ভোট করতে পারেন। ধন্যবাদ। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৬:৩৫, ১ ফেব্রুয়ারি ২০১০ (UTC)
: আসলে বাগটি ইস্যু করার আগে আলোচনা করার উচিত ছিল। নেমস্পেসের নাম 'উইকিসরাস'। উইকিসরাস শব্দটি দিয়ে আসলে কি বোঝায়, তা কি দেখেই বোঝা যায়? আমি তো উইকিসরাস দিয়ে কি বোঝাচ্ছে তা দেখেই বুঝতে পারছি না। আরও সহজ কিছু প্রস্তাব করা যায় কিনা ভেবে দেখার অনুরোধ করছি। আর কোন বাগ ইস্যু করার ব্যাপারে তাড়াহুড়ো না করার অনুরোধও রাখছি।--[[ব্যবহারকারী:Bellayet|Bellayet]] ০৫:৩৭, ২ ফেব্রুয়ারি ২০১০ (UTC)
::আমার কাছে এটা তাড়াহুড়ো মনে হয় না। অনেক আগে থেকেই প্রধান পাতায় উইকিসরাস শব্দটি দিয়ে কি বোঝানো হয় তাঁর ব্যাখ্যা আছে। অভিধানমূলক প্রকল্প হিসেবে শুরুতে এ নেমস্পেসটি থাকা উচিত ছিলো, যা হয় নি। এ বিষয়ে ভুক্তি, বিষয়শ্রেণীও আছে। তাছাড়া উইকি সমার্থকশব্দকোষকে বাংলায় উইকিসরাস করাটা ইংরেজির-ই বাংলা প্রতিবর্ণীকরণ বলতে বলতে পারেন। সমার্থকশব্দকোষ বা "সমার্থক শব্দ" নেমস্পেস হিসেবে বড়, তাই এটাকে শর্টকাট করে ইংরেজির সাথে তাল রেখে উইকিসরাস করাটা অনেক আগে থেকেই। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ০৬:৪২, ২ ফেব্রুয়ারি ২০১০ (UTC)
:::আরেকটি সুসংবাদ। উইকিসরাস নেমস্পেসটির জন্য করা আবেদনটি কিছুক্ষণ আগে সফল হলো। আমরা আমাদের সমার্থশব্দকোষ-মূলক প্রথম ভুক্তিটি ([[উইকিসরাস:বস্তু]]) তার নিজস্ব নামস্থানেই স্থান পাচ্ছে....। নামস্থানটি সবাইকে সমৃদ্ধ করার অনুরাধ আর সবাইকে ধন্যবাদ। — [[User:Wikitanvir|তানভির]] <big>•</big> [[User talk:Wikitanvir|আলাপ]] <big>↑</big> [[Special:Contributions/Wikitanvir|অবদান]] <big>↓</big> ১৮:৪২, ১৬ এপ্রিল ২০১০ (UTC)
== [[MediaWiki talk:Transli.js#Narayam]]==
Hello, I've posted a proposal to improve the input method/transliteration tool via Narayam in the above discussion. Please comment. Thanks, [[User:Nemo bis|Nemo bis]] ([[User talk:Nemo bis|আলাপ]]) ১৩:০৩, ৯ সেপ্টেম্বর ২০১২ (ইউটিসি)
== Wikidata is getting close to a first roll-out ==
[[File:Wikimedia_Foundation_RGB_logo_with_text.svg|80px|right]]
(Apologies if this message isn't in your language.)
As some of you might already have heard Wikimedia Deutschland is working on a new Wikimedia project. It is called [[m:Wikidata]]. The goal of Wikidata is to become a central data repository for the Wikipedias, its sister projects and the world. In the future it will hold data like the number of inhabitants of a country, the date of birth of a famous person or the length of a river. These can then be used in all Wikimedia projects and outside of them.
The project is divided into three phases and "we are getting close to roll-out the first phase". The phases are:
# language links in the Wikipedias (making it possible to store the links between the language editions of an article just once in Wikidata instead of in each linked article)
# infoboxes (making it possible to store the data that is currently in infoboxes in one central place and share the data)
# lists (making it possible to create lists and similar things based on queries to Wikidata so they update automatically when new data is added or modified)
It'd be great if you could join us, test the [http://wikidata-test.wikimedia.de demo version], provide feedback and take part in the development of Wikidata. You can find all the relevant information including an [[m:Wikidata/FAQ|FAQ]] and sign-up links for our on-wiki newsletter on [[m:Wikidata|the Wikidata page on Meta]].
For further discussions please use [[m:Talk:Wikidata|this talk page]] (if you are uncomfortable writing in English you can also write in your native language there) or point [[m:User_talk:Lydia Pintscher (WMDE)|me]] to the place where your discussion is happening so I can answer there.
--[[m:User:Lydia Pintscher (WMDE)|Lydia Pintscher]] ১৩:০৮, ১০ সেপ্টেম্বর ২০১২ (ইউটিসি)
<small>Distributed via [[m:Global message delivery|Global message delivery]]. (Wrong page? [[m:Distribution list/Global message delivery|Fix here]].)</small>
<!-- EdwardsBot 0248 -->
== Upcoming software changes - please report any problems ==
[[File:Wikimedia_Foundation_RGB_logo_with_text.svg|80px|right]]
<div dir=ltr>
''(Apologies if this message isn't in your language. Please consider translating it)''
All Wikimedia wikis - including this one - will soon be upgraded with new and possibly disruptive code. This process starts today and finishes on October 24 (see the [[mw:MediaWiki_1.21/Roadmap|upgrade schedule]] & [[mw:MediaWiki 1.21/wmf2|code details]]).
Please watch for problems with:
* revision diffs
* templates
* CSS and JavaScript pages (like user scripts)
* bots
* PDF export
* images, video, and sound, especially scaling sizes
* the CologneBlue skin
If you notice any problems, please [[mw:How to report a bug|report problems]] at [[mw:Bugzilla|our defect tracker site]]. You can test for possible problems at [https://test2.wikipedia.org test2.wikipedia.org] and [https://mediawiki.org/ mediawiki.org], which have already been updated.
Thanks! With your help we can find problems fast and get them fixed faster.
[[mw:User:Sharihareswara (WMF)|Sumana Harihareswara, Wikimedia Foundation Engineering Community Manager]] ([[mw:User talk:Sharihareswara (WMF)|talk]]) ০২:৪৩, ১৬ অক্টোবর ২০১২ (ইউটিসি)
P.S.: For the regular, smaller MediaWiki updates every two weeks, please [[mw:MediaWiki_1.21/Roadmap|watch this schedule]].
<small>Distributed via [[m:Global message delivery|Global message delivery]]. (Wrong page? [[m:Distribution list/Global message delivery|Fix here]].)</small>
</div>
<!-- EdwardsBot 0278 -->
== Fundraising localization: volunteers from outside the USA needed ==
''Please translate for your local community''
Hello All,
The Wikimedia Foundation's Fundraising team have begun our 'User Experience' project, with the goal of understanding the donation experience in different countries outside the USA and enhancing the localization of our donation pages. I am searching for volunteers to spend 30 minutes on a Skype chat with me, reviewing their own country's donation pages. It will be done on a 'usability' format (I will ask you to read the text and go through the donation flow) and will be asking your feedback in the meanwhile.
The only pre-requisite is for the volunteer to actually live in the country and to have access to at least one donation method that we offer for that country (mainly credit/debit card, but also real-time banking like IDEAL, E-wallets, etc...) so we can do a live test and see if the donation goes through. ''All volunteers will be reimbursed of the donations that eventually succeed'' (and they will be low amounts, like 1-2 dollars)
By helping us you are actually helping thousands of people to support our mission of free knowledge across the world. Please sing up and help us with our 'User Experience' project! :)
If you are interested (or know of anyone who could be) please email ppena@wikimedia.org. All countries needed (excepting USA)!
Thanks!<br />
[[wmf:User:Ppena|Pats Pena]]<br />
Global Fundraising Operations Manager, Wikimedia Foundation
Sent using [[m:Global message delivery|Global message delivery]], ১৬:৪৮, ১৭ অক্টোবর ২০১২ (ইউটিসি)
<!-- EdwardsBot 0280 -->
== Localise your wiki logo ==
[[File:Wiktionary-logo.svg|thumb|The official localisable Wiktionary logo]]
Hello! It was noted that Wiktionary in this language has not yet adopted a localised/translated logo: it's really a pity for a dictionary project!<br />
We are trying to help Wiktionaries adopt a locally-adapted logo, by taking the technical difficulties on us. What we need from you is just the preferred translation of the name and motto, "Wiktionary" (if translated) and "The free dictionary": you can add them to [[m:User:Cbrown1023/Logos#Wiktionary|the logo list]], by editing it directly or commenting on the talk page; you can also add a note if you don't want the localised logo.<br />
Of course, you can also create the logo and make the necessary requests on [[m:Bugzilla|bugzilla]] yourself, if you prefer so.<br />
Feel free to translate this message and to move/copy/forward it where appropriate; you can also reply on [[m:User talk:Nemo_bis|my talk]]. Thanks, [[m:User:Nemo_bis|Nemo]] ১৫:৩১, ৩ নভেম্বর ২০১২ (ইউটিসি)
<!-- EdwardsBot 0290 -->
== Localise your wiki logo ==
[[File:Wiktionary-logo.svg|thumb|The official localisable Wiktionary logo]]
Hello! It was noted that Wiktionary in this language has not yet adopted a localised/translated logo: it's really a pity for a dictionary project!<br />
We are trying to help Wiktionaries adopt a locally-adapted logo, by taking the technical difficulties on us. What we need from you is just the preferred translation of the name and motto, "Wiktionary" (if translated) and "The free dictionary": you can add them to [[m:User:Cbrown1023/Logos#Wiktionary|the logo list]], by editing it directly or commenting on the talk page; you can also add a note if you don't want the localised logo.<br />
Of course, you can also create the logo and make the necessary requests on [[m:Bugzilla|bugzilla]] yourself, if you prefer so.<br />
Feel free to translate this message and to move/copy/forward it where appropriate; you can also reply on [[m:User talk:Nemo_bis|my talk]]. Thanks, [[m:User:Nemo_bis|Nemo]] ১৫:৫৫, ৩ নভেম্বর ২০১২ (ইউটিসি)
<!-- EdwardsBot 0291 -->
== Be a Wikimedia fundraising "User Experience" volunteer! ==
Thank you to everyone who volunteered last year on the Wikimedia fundraising 'User Experience' project. We have talked to many different people in different countries and their feedback has helped us immensely in restructuring our pages. If you haven't heard of it yet, the 'User Experience' project has the goal of understanding the donation experience in different countries (outside the USA) and enhancing the localization of our donation pages.
I am (still) searching for volunteers to spend some time on a Skype chat with me, reviewing their own country's donation pages. It will be done on a 'usability' format (I will ask you to read the text and go through the donation flow) and will be asking your feedback in the meanwhile.
The only pre-requisite is for the volunteer to actually live in the country and to have access to at least one donation method that we offer for that country (mainly credit/debit card, but also real time banking like IDEAL, E-wallets, etc...) so we can do a live test and see if the donation goes through. **All volunteers will be reimbursed of the donations that eventually succeed (and they will be very low amounts, like 1-2 dollars)**
By helping us you are actually helping thousands of people to support our mission of free knowledge across the world. If you are interested (or know of anyone who could be) please email ppena@wikimedia.org. All countries needed (excepting USA)!!
Thanks!
[[m:User:Ppena (WMF)|Pats Pena]]<br/>
Global Fundraising Operations Manager, Wikimedia Foundation
: Sent using [[m:Global message delivery|Global message delivery]], ২০:৩৮, ৮ জানুয়ারি ২০১৩ (ইউটিসি)
<!-- EdwardsBot 331 -->
== Wikimedia sites to move to primary data center in Ashburn, Virginia. Read-only mode expected. ==
(Apologies if this message isn't in your language.) Next week, the Wikimedia Foundation will transition its main technical operations to a new data center in Ashburn, Virginia, USA. This is intended to improve the technical performance and reliability of all Wikimedia sites, including this wiki. There will be some times when the site will be in read-only mode, and there may be full outages; the current target windows for the migration are January 22nd, 23rd and 24th, 2013, from 17:00 to 01:00 UTC (see [http://www.timeanddate.com/worldclock/fixedtime.html?msg=Wikimedia+data+center+migration&iso=20130122T17&ah=8 other timezones] on timeanddate.com). More information is available [https://blog.wikimedia.org/2013/01/19/wikimedia-sites-move-to-primary-data-center-in-ashburn-virginia/ in the full announcement].
If you would like to stay informed of future technical upgrades, consider [[m:Tech/Ambassadors|becoming a Tech ambassador]] and [https://lists.wikimedia.org/mailman/listinfo/wikitech-ambassadors joining the ambassadors mailing list]. You will be able to help your fellow Wikimedians have a voice in technical discussions and be notified of important decisions.
Thank you for your help and your understanding.
[[:m:user:guillom|Guillaume Paumier]], via the [[:m:Global message delivery|Global message delivery system]] <small>([[:m:Distribution list/Global message delivery|wrong page? You can fix it.]])</small>. ১৫:০৩, ১৯ জানুয়ারি ২০১৩ (ইউটিসি)
<!-- EdwardsBot 0338 -->
== Help turn ideas into grants in the new IdeaLab ==
<div class="mw-content-ltr">
[[File:Wikimedia_Foundation_RGB_logo_with_text.svg|80px|right]]
''I apologize if this message is not in your language. Please help translate it.''
*Do you have an idea for a project to improve this community or website?
*Do you think you could complete your idea if only you had some funding?
*Do you want to help other people turn their ideas into project plans or grant proposals?
Please join us in the [[m:Grants:IdeaLab|IdeaLab]], an incubator for project ideas and Individual Engagement Grant proposals.
The Wikimedia Foundation is seeking new ideas and proposals for Individual Engagement Grants. These grants fund individuals or small groups to complete projects that help improve this community. If interested, please submit a completed proposal by February 15, 2013. Please visit https://meta.wikimedia.org/wiki/Grants:IEG for more information.
Thanks! --[[m:User:Sbouterse (WMF)|Siko Bouterse, Head of Individual Engagement Grants, Wikimedia Foundation]] ২০:০৯, ৩০ জানুয়ারি ২০১৩ (ইউটিসি)
<small>Distributed via [[m:Global message delivery|Global message delivery]]. (Wrong page? [[m:Distribution list/Global message delivery|Correct it here]].)</small>
</div>
<!-- EdwardsBot 0344 -->
== Convert complex templates to Lua to make them faster and more powerful ==
<small>(Please consider translating this message for the benefit of your fellow Wikimedians)</small>
Greetings. As you might have seen on the [https://blog.wikimedia.org/2013/03/11/lua-templates-faster-more-flexible-pages/ Wikimedia tech blog] or the [http://lists.wikimedia.org/pipermail/wikitech-ambassadors/2013-March/000171.html tech ambassadors list], a new functionality called "Lua" is being enabled on all Wikimedia sites today. [[mw:Lua|Lua]] is a scripting language that enables you to write faster and more powerful MediaWiki templates.
If you have questions about how to convert existing templates to Lua (or how to create new ones), we'll be holding two support sessions on IRC next week: [http://www.timeanddate.com/worldclock/fixedtime.html?hour=02&min=00&sec=0&day=20&month=03&year=2013 one on Wednesday] (for Oceania, Asia & America) and [http://www.timeanddate.com/worldclock/fixedtime.html?hour=18&min=00&sec=0&day=22&month=03&year=2013 one on Friday] (for Europe, Africa & America); see [[m:IRC office hours]] for the details. If you can't make it, you can also get help at [[mw:Talk:Lua scripting]].
If you'd like to learn about this kind of events earlier in advance, consider becoming a [[m:Tech/Ambassadors|Tech ambassador]] by subscribing to the [https://lists.wikimedia.org/mailman/listinfo/wikitech-ambassadors mailing list]. You will also be able to help your fellow Wikimedians have a voice in technical discussions and be notified of important decisions.
[[:m:user:guillom|Guillaume Paumier]], via the [[:m:Global message delivery|Global message delivery system]]. ১৮:৪৫, ১৩ মার্চ ২০১৩ (ইউটিসি) <small>([[:m:Distribution list/Global message delivery|wrong page? You can fix it.]])</small>
<!-- EdwardsBot 0379 -->
== Proposal of a pronunciation recording tool ==
<div class="mw-content-ltr">
Hello, [[mw:User:Rahul21|Rahul21]], a developer, offers to develop a pronunciation recording tool for Wiktionary, helped by [[m:User:Mdale|Michael Dale]] as part of [[mw:Summer of Code 2013|GSoC]]. The tool would allow to record and add audio pronunciations to Wiktionary entries while browsing them (see [http://thread.gmane.org/gmane.org.wikimedia.wiktionary/1265 background discussion on Wiktionary-l]).
Please [[mw:User:Rahul21/Gsoc|read and comment the proposal]]!
Regards, [[m:User:Nemo_bis|Nemo]] ২২:৩৬, ৯ এপ্রিল ২০১৩ (ইউটিসি)
</div>
<!-- EdwardsBot 0402 -->
== [[m:Requests for comment/Activity levels of advanced administrative rights holders|Request for comment on inactive administrators]] ==
; নিস্ক্রিয় প্রশাসকদের জন্য মন্তব্যের অনুরোধ
<small>(আপনার সহযোগী উইকিমিডিয়ানদের সুবিধার জন্য এই বার্তাটি অনুবাদ করুন। এছাড়াও অনুগ্রহ করে [[m:Requests for comment/Activity levels of advanced administrative rights holders/Summary|প্রস্তাবটি]] অনুবাদ করতে বিবেচনা করুন।)</small>
<small>[[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message|Read this message in English]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/ast|Lleer esti mensaxe n'asturianu]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/bn|বাংলায় এই বার্তাটি পড়ুন]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/ca|Llegiu aquest missatge en català]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/da|Læs denne besked på dansk]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/de|Lies diese Nachricht auf Deutsch]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/egl|Leś cal mesag' chè in Emiliàn]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/es|Leer este mensaje en español]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/fi|Lue tämä viesti suomeksi]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/fr|Lire ce message en français]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/gl|Ler esta mensaxe en galego]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/hi|हिन्दी]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/hr|Pročitajte ovu poruku na hrvatskom]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/id|Baca pesan ini dalam Bahasa Indonesia]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/it|Leggi questo messaggio in italiano]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/kn|ಈ ಸಂದೇಶವನ್ನು ಕನ್ನಡದಲ್ಲಿ ಓದಿ]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/mt|Aqra dan il-messaġġ bil-Malti]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/nb|norsk (bokmål)]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/nl|Lees dit bericht in het Nederlands]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/pl|Przeczytaj tę wiadomość po polsku]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/ro|Citiți acest mesaj în română]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/ru|Прочитать это сообщение на русском]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/so|Farriintaan ku aqri Af-Soomaali]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/sr|Pročitaj ovu poruku na srpskom (Прочитај ову поруку на српском)]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/th|อ่านข้อความนี้ในภาษาไทย]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/uk|Прочитати це повідомлення українською мовою]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/vi|Đọc thông báo bằng tiếng Việt]] / [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message/zh|使用中文阅读本信息。]]</small>
হ্যালো!
দীর্ঘমেয়াদে নিষ্ক্রিয় উইকিমিডিয়ানদের প্রশাসনিক অধিকার থেকে অপসারণের বিষয়ে মেটা-উইকিতে মন্তব্যের জন্য [[m:Requests for comment/Activity levels of advanced administrative rights holders|একটি নতুন অনুরোধ আছে]]। সাধারণত, স্ট্যুয়ার্ডগণ থেকে এই প্রস্তাবটি প্রশাসকদের পর্যালোচনা প্রক্রিয়া ছাড়া উইকিতে প্রয়োগ করা হয়।
আমরা মন্তব্যের জন্য অনুরোধের আলাপ পাতাতে নিস্ক্রিয় প্রশাসকদের সরানোর জন্য পদ্ধতির সঙ্গে [[m:Talk:Requests for comment/Activity levels of advanced administrative rights holders|প্রকল্পের একটি তালিকা]] কম্পাইল করছি। যদি প্রশাসকদের নিষ্ক্রিয়তা উপর আপনার কোন নীতি থাকে তাহলে তালিকায় আপনার প্রকল্প(গুলি) নির্দ্বিধায় যুক্ত করুন।
সকল মতামত গ্রহণযোগ্য। আলোচনা যত তাড়াতাড়ি সম্ভব ২১ মে ২০১৩ (২০১৩-০৫-২১) এর মধ্যে বন্ধ করা হবে, কিন্তু প্রয়োজন হলে এই সময়সীমা বাড়ানো হবে।
ধন্যবাদ, [[m:User:Billinghurst|Billinghurst]] <small>(thanks to all the [[m:Requests for comment/Activity levels of advanced administrative rights holders/Global message|translators]]!)</small> ০৪:২১, ২৪ এপ্রিল ২০১৩ (ইউটিসি)
:<small>[[m:Global message delivery|গ্লোবাল বার্তা বিলির]] মাধ্যমে বিতরণ (ভুল পাতা? [[m:Distribution list/Global message delivery|আপনি এটি ঠিক করতে পারেন]]।)</small>
<!-- EdwardsBot 0430 -->
== [en] Change to wiki account system and account renaming ==
<div class="mw-content-ltr">
Some accounts will soon be renamed due to a technical change that the developer team at Wikimedia are making. [[m:Single User Login finalisation announcement|More details on Meta]].
<small>(Distributed via [[m:global message delivery|global message delivery]] ০৩:২০, ৩০ এপ্রিল ২০১৩ (ইউটিসি). Wrong page? [[m:Distribution list/Global message delivery|Correct it here]].)</small>
</div>
<!-- EdwardsBot 0437 -->
== [en] Change to section edit links ==
<div class="mw-content-ltr">
The default position of the "edit" link in page section headers is going to change soon. The "edit" link will be positioned adjacent to the page header text rather than floating opposite it.
Section edit links will be to the immediate right of section titles, instead of on the far right. If you're an editor of one of the wikis which already implemented this change, nothing will substantially change for you; however, scripts and gadgets depending on the previous implementation of section edit links will have to be adjusted to continue working; however, nothing else should break even if they are not updated in time.
[[m:Change to section edit links|Detailed information and a timeline]] is available on meta.
Ideas to do this all the way to 2009 at least. It is often difficult to track which of several potential section edit links on the far right is associated with the correct section, and many readers and anonymous or new editors may even be failing to notice section edit links at all, since they read section titles, which are far away from the links.
<small>(Distributed via [[m:global message delivery|global message delivery]] ১৮:১০, ৩০ এপ্রিল ২০১৩ (ইউটিসি). Wrong page? [[m:Distribution list/Global message delivery|Correct it here]].)</small>
</div>
<!-- EdwardsBot 0438 -->
== উইকিঅভিধানের নতুন প্রশাসক !! ==
Pratyya Ghosh এর উইকিঅভিধানে কাজ করার আগ্রহ দেখে ভাল লাগছে। তিনি ৪ মে [[উইকিঅভিধান:প্রশাসক হওয়ার আবেদন/Pratyya Ghosh|প্রশাসক হওয়ার আবেদন করেন]] (''১৫ মার্চ ২০১৩ তে তিনি প্রথম সম্পাদনা করেন, মাত্র [http://toolserver.org/~quentinv57/sulinfo/Pratyya%20Ghosh ৯৯টি] সম্পাদনার পর তিনি কিভাবে প্রশাসক হওয়ার যোগ্য হন???'') তার আবেদনে [[Special:Contributions/Siam4319|Siam4319]] (এই আইডিটি ৪মে তৈরি হয়) সমর্থন দেয়। যেটিতে আমার কিছু সন্দেহ হচ্ছে। কারন, Siam4319 এর গ্লোবাল সম্পাদনার সংখ্যা ৩টি। যিনি তার সমর্থনে বলেন ''I'll surely support him. He is my classmate'' যেটি সম্পূর্ণভাবে [http://en.wikipedia.org/wiki/Wikipedia:Meatpuppet#Meatpuppetry Meatpuppetry এর অধীনে পড়ে] (যেটি কোনমতেই গ্রহন যোগ্য নয়)। দ্বিতীয়ত, মাত্র ৩টি সম্পাদনা করা ব্যবহারকারীর মতামত কিভাবে প্রশাসক হওয়ার আবেদনে গৃহীত হয় তাও আমি বুজতেছিনা। তার এই [http://en.wikipedia.org/wiki/Wikipedia:Sock_puppetry SP] সমর্থন বাদ দিলে [[উইকিঅভিধান:প্রশাসক হওয়ার আবেদন]] এর নিয়ম অনুযায়ী তিনি প্রশাসক হতে পারেন না (যদিও তিনি এখন প্রশাসক) <br />এই পরিস্থিতে কি করা উচিত। --[[User:Aftab1995|লিমন]] ([[User talk:Aftab1995|আলাপ]] | [[Special:Contributions/Aftab1995|অবদান]]) ১৪:২২, ১৪ মে ২০১৩ (ইউটিসি)
:দেখুন আপনি কি ভিত্তিতে SP বলছেন তা আমি বুঝছি না। তার নাম সিয়াম আমি এটুকুই বলব যেহেতু সে আমাকে নিষেধ করেছে তার নাম বলতে। আর তার কোন কম্পিউটার নেই। সে কম্পিউটার ব্যবহার করতে গেলে আমার কম্পিউটার ব্যবহার করে। তাই তার আর আমার আইপি অ্যাড্রেস এক হতে পারে। এছাড়া তার পরীক্ষা চলছে। কোথায় আমি তা বলব না। তবে এতুকু নিশ্চিত করতে পারি যে সে সম্পাদনার কাজ বন্ধ করবে না। কিন্তু আপনার আর আমার মত নিয়মিত সম্পাদনা করবে না। আর আমি সে আমাকে সমর্থন দেয়ার আগে বলেছিলাম যে এরকম কিছু একটা হতে পারে। কিন্তু সে শোনেনি এবং একজন ব্যবহারকারী কে বাধা দেয়ার ক্ষমতা আমার নেই। তবে এখন থেকে আমি তাকে অনুরধ করব সমর্থন না দেয়ার ব্যাপারে। দরকার হলে আপনি একটা ওয়ারনিং দিতে পারেন। আর আশা করব আপনি এই উইকি-তে নিয়মিত সম্পাদনা করবেন এবং আমরা এই উইকি-কে en উইকির এর মত করে তুলতে পারব। শেষ প্রশ্ন আমি অ্যাডমিন হওয়ার আগে যখন আবেদন করেছিলাম আপনি তখন মন্তব্য করেন-নি কেন? আর সম্পাদনা সংখ্যা দিয়ে অ্যাডমিন নির্ণয় করা যায় না। আমি এখন যাচ্ছি প্রশ্ন থাকলে করুন কালকে reply করব। --'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ১৪:৫৮, ১৪ মে ২০১৩ (ইউটিসি)
::[http://meta.wikimedia.org/w/index.php?title=Steward_requests/Checkuser&action=edit§ion=4 এটির ভিত্তিতে] No check necessary here, it more looks like a "[[:en:WP:Meatpuppet|meatpuppet]]") SP বলেছি। আর দেখুন একাউন্টটি কিন্তু আপনার আবেদন করার দিন তৈরি হয়, এই কারনে আমার সন্দেহ টা বেশী হয়েছে--[[User:Aftab1995|লিমন]] ([[User talk:Aftab1995|আলাপ]] | [[Special:Contributions/Aftab1995|অবদান]]) ১৫:২৩, ১৪ মে ২০১৩ (ইউটিসি)
::: প্রত্যয় ঘোষ [[:m:Steward_requests/Permissions#Pratyya Ghosh@bn-wiktionary|মেটাউইকিতে]] তার আবেদনে লিখেছে,
::Hey I requested adminship on bn.wiktionary and got local support from the local community I don't know how I got that support. But I want this right for the sake of that wiktionary. Because there are no one available. Only admin is not available. That's why I want this right.--Pratyya (Hello!) 12:08, 11 May 2013 (UTC)}}
:::প্রত্যয় আপনি এবং সিয়াম যে একই ব্যক্তি নয় তা আপনি শুধু দাবি করছে, কিন্তু এই মূহুর্তে কোন উপায়ে আপনি তা প্রমাণ করতে পারবেন না। আপনি আপনার বন্ধুর নাম বা পরিচয় দিলেও তা প্রমাণ হয় না যে ঐ অ্যাকাউন্টটি আপনারই আরেকটি অ্যাকাউন্ট নয়। মেটাউইকিতে আপনার আবেদনে আপনি লিখেছেন "আপনি জানেন না কিভাবে এখানে সমর্থন পেলেন।" এ কথার প্রকৃত মানে কি? এখানে আপনি কোথায় লোকাল কমিউনিটির সমর্থন পেয়েছেন? আপনি কি এখানে মিথ্যাচার করেননি?
:::আর আপনি এখানে অবদান রেখেছেন, আপনার অবশ্যই ধারণা রয়েছে যে এখানে নিয়মিত ব্যবহারকারী নেই বা আপনার আবেদনে মতামত দিতে পারে এমন ব্যবহারকারী এখানে নেই। তো আপনার উচিত ছিল এ বিষয়ে মন্তব্য চেয়ে বাংলা উইকিপিডিয়ায় একটি নোটিফিকেশন দেওয়া। তাই কেন লিমন আগে এখানে মন্তব্য করেনি তা প্রশ্ন করা অমূলক।
:::[[:m:Steward_requests/Permissions#Pratyya Ghosh@bn-wiktionary]] অনুযায়ী প্রত্যয় ঘোষ তিন মাসের জন্য উইকিঅভিধানের অস্থায়ী প্রশাসকত্ব পেয়েছেন। যেহেতু অন্যকোনো সক্রিয় প্রশাসক নেই এবং উইকিঅভিধানের উন্নয়নে সে এই প্রশাসকত্ব নিয়েছেন বলে দাবি করেছে। আমার যতটুকু ধারণা প্রত্যয়ের আবেদন গৃহীত হয়েছে শুধু মাত্র এ কারণে যে অধিকারটি অস্থায়ীভাবে দেওয়া হয়েছে এবং এখানে অন্য কোন সক্রিয় প্রশাসক নেই। আমি দেখতে আগ্রহী যে প্রতিশ্রুতি দিয়ে প্রত্যয় অস্থায়ী প্রশাসত্ব পেয়েছে তা কতটুকু রক্ষা করতে পারেন। প্রশাসকত্ব লাভ কোন পদ লাভ নয়, এর অর্থ আপনি আপনার সাধারণ অবদানের পাশাপাশি কিছু বাড়তি দায়িত্ব গ্রহণ করা। আমি দেখতে আগ্রহী প্রত্যয় তার কথা অনুযায়ী কতটুকু দায়িত্ব পালন করে এবং উইকিঅভিধানের উন্নয়নে সে কি অবদান রাখতে পারে।--[[User:Bellayet|Bellayet]] ([[User talk:Bellayet|আলাপ]]) ১৬:০৩, ১৪ মে ২০১৩ (ইউটিসি)
::::*কিন্তু আমি কি করব লিমন ভাই। আমি আপনাকে বলেছি আপনি তাকে ওয়ার্নিং দিন। আমি মৌখিক ভাভে কাল্কেই এ ব্যাপারে তাকে সতর্ক করেছি এবং সে আশ্বাস দিয়েছে ভবিষ্যতে এরকম করবে না।
*২য়ত বেলায়েত ভাই আমি প্রমান দিতে পারব না। আপনাকে আমার কথার উপর বিশ্বাস রাখতে হবে। আর এতদিনে আমি কোন খারাপ কাজ উইকি-তে করি নি। আমার নিজস্ব ২টি অ্যাকাউন্ট আছে, সেতা আমি স্বীকার করে নিয়েছি। আর এই প্রথম আমি এত বড় কোন আবেদন করেছি। এর পর আবেদন করলে আমি বাংলা উইকিপিডিয়া-র প্রশাসক-দের আলোচনা সভায় নিয়ে যাব। আর এই ৩ মাস আপনি আমার কাজ দেখুন। আমার বিশ্বাস আমি ৩ মাস পর যখন আবার আবেদন করব আপনি নিজেও আমার কাজে মুগ্ধ হয়ে ভোট দেবেন। খালি ৩ মাস আমার কাজ দেখুন এবং আমার কাজে সাহায্য করুন। --'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ০৫:৩১, ১৫ মে ২০১৩ (ইউটিসি)
::::প্রত্যয় আপনি সঠিকভাবে আমার প্রশ্নের উত্তর দেননি। আপনি আপনার আবেদনে যে উদ্দেশ্যমূলকভাবে ভুল বা মিথ্যা তথ্য দিয়েছেন তা কোনো ভাল কাজের মধ্যে পরে না। আপনি এখনও লিমনকে বলছেন অদৃশ্য কাউকে ওয়ার্নিং দিতে আবার আপনিও তাকে সতর্ক করে দিয়েছেন। কিন্তু এই নাটক কেন? তাহলে কোথায় আপনি স্বীকার করেছেন যে আপনার ২টি অ্যাকাউন্ট রয়েছে?
:::::'''Ghosh10'''। এতি আমার ২ নং অ্যাকাউন্ট। এতি বাংলা উইকিপিডিয়া-তে কার্যকর নেই। সম্ভবত en wiki and simple wiki. আর আপনি যদি তা মিথ্যাচার বলেন তাহলে তাই বলুন। কিন্তু কোন অদৃশ্য কেউ নেই। সে আছে। কিন্তু আমি বলেইছি এ ব্যপারে এমন কিছু বলব না যাতে তার পরিচয় প্রকাশ পায়। তাই আপনি যা চিন্তা করবেন তাই ঠিক। আর এ ব্যপারে আমি আর কোন কমেন্ট চাচ্ছি না। এখানেই ব্যপারতা শেষ হওয়া ভাল। আমি আপনাকে সব কিছু বলে দিয়েছি, আর এর বেশি কিছু বলা সম্ভব নয়। আর ওকে নিয়ে এত জল ঘোলা হলে ওকে আমি বলে দিব নতুন কোন অ্যাকাউন্ট খুলে বাংলা উইকিপিডিয়া থেকে ১০০ মাইল দূরে থাকতে। এ ছাড়া আমি আর কিছু বলতে পারছি না। ধন্যবাদ। আর আমি নিয়মিত মেটা স্টুয়ার্ড-দের সঙ্গে কথা বলি। যদি তারা মনে করতেন এ ব্যপারতা ঠিক নয় তারা আমাকে এ অধিকার দিতেন না।--'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ০৩:১২, ১৬ মে ২০১৩ (ইউটিসি)
:::আপনাকে অধিকার দেওয়া না দেওয়ার প্রশ্ন এখানে নয়। তৃতীয় কাউকে নিয়ে এখানে জল ঘোলাও হয়নি। আমি আগেই বলেছি কারও সমর্থনে আপনি এখানে অধিকার পাননি। কিন্তু এই অধিকার পেতে আপনি যে পন্থা (মেটাপাতায় মিথ্যাচার) অবলম্বন করেছেন তা ন্যায়সঙ্গত নয়। আপনি স্বীকার করুন আর নাই বা করুন তাতে কিছু এসে যায় না। সময়েই বলে সব বলে দিবে।--[[User:Bellayet|Bellayet]] ([[User talk:Bellayet|আলাপ]]) ১৩:৩৩, ১৬ মে ২০১৩ (ইউটিসি)
::::কি বলবো ঠিক বুঝে উঠতে পারছিনা! এক্ষেত্রে সিদ্ধান্ত নেয়ার ভার রাগিব হাসান / তানভীর / বেলায়েত সাহেবদের উপর ছেড়ে দেয়াই ভালো।
::::তবে, পর্যবেক্ষণ করে দেখা যেতে পারে যে তিনি কতটুকু অবদান রাখেন। - - - [[User:Ashiq Shawon|Ashiq Shawon]] ([[User talk:Ashiq Shawon|আলাপ]]) ১৩:৩৭, ১৬ মে ২০১৩ (ইউটিসি)
<!--== Tech newsletter: Subscribe to receive the next editions ==
<div style="width:auto; padding: 1em; background:#fdf6e3;" class="plainlinks" ><big>Latest '''[[m:Tech/News|<span style="color:#268bd2;">Tech news</span>]]''' from the Wikimedia technical community.</big> ''Please inform other users about these changes.''</div>
<div style="width:auto; padding: 1em; border: 2px solid #fdf6e3;" class="plainlinks" >
;Recent software changes: ''(Not all changes will affect you.)''
* The latest version of MediaWiki (version [[mw:MediaWiki 1.22/wmf4|1.22/wmf4]]) was added to non-Wikipedia wikis on May 13, and to the English Wikipedia (with a Wikidata software update) on May 20. It will be updated on all other Wikipedia sites on May 22. [https://gerrit.wikimedia.org/r/gitweb?p=operations/mediawiki-config.git;a=commitdiff;h=ed976cf0c14fa3632fd10d9300bb646bfd6fe751;hp=c6c7bb1e5caaddf7325de9eef0e7bf85bcf5cc35] [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069458.html]
* A software update will perhaps result in temporary issues with images. Please [[m:Tech/Ambassadors|report any problems]] you notice. [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069458.html]
* MediaWiki recognizes links in twelve new [[:w:en:URI scheme|schemes]]. Users can now link to [[:w:en:SSH|SSH]], [[:w:en:XMPP|XMPP]] and [[:w:en:Bitcoin|Bitcoin]] directly from wikicode. [https://gerrit.wikimedia.org/r/gitweb?p=mediawiki/core.git;a=commitdiff;h=a89d623302b5027dbb2d06941a22372948757685]
* VisualEditor was added to [[bugzilla:48430|all content namespaces]] on mediawiki.org on May 20. [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069458.html]
* A new extension ("TemplateData") was added to all Wikipedia sites on May 20. It will allow a future version of VisualEditor to [[bugzilla:44444|edit templates]]. [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069458.html]
* New sites: [[:voy:el:|Greek Wikivoyage]] and [[:wikt:vec:|Venetian Wiktionary]] joined the Wikimedia family last week; the total number of project wikis is now 794. [https://gerrit.wikimedia.org/r/gitweb?p=operations/mediawiki-config.git;a=commit;h=5d7536b403730bb502580e21243f923c3b79da0e] [https://gerrit.wikimedia.org/r/gitweb?p=operations/mediawiki-config.git;a=commit;h=43c9eebdfc976333be5c890439ba1fae3bef46f7]
* The logo of 18 Wikipedias was changed to [[w:en:Wikipedia:Wikipedia_logos#The_May_2010_logo|version 2.0]] in a [http://lists.wikimedia.org/pipermail/wikimedia-l/2013-May/125999.html third group of updates]. [https://gerrit.wikimedia.org/r/gitweb?p=operations/mediawiki-config.git;a=commitdiff;h=4688adbe467440eea318eecf04839fdd9ffa0565]
* The [[:commons:Special:UploadWizard|UploadWizard]] on Commons now shows links to the old upload form in 55 languages ([[:bugzilla:33513|bug 33513]]). [https://gerrit.wikimedia.org/r/gitweb?p=operations/mediawiki-config.git;a=commit;h=4197fa18a22660296d0e5b84820d5ebb4cef46d4]
;Future software changes:
* The next version of MediaWiki (version 1.22/wmf5) will be added to Wikimedia sites starting on May 27. [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069458.html]
* An updated version of [[mw:Echo (Notifications)|Notifications]], with new features and fewer bugs, will be added to the English Wikipedia on May 23. [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069458.html]
* The [[m:Special:MyLanguage/Single User Login finalisation announcement|final version]] of the "single user login" (which allows people to use the same username on different Wikimedia wikis) is moved to August 2013. The software will [http://lists.wikimedia.org/pipermail/wikitech-ambassadors/2013-April/000217.html automatically rename] some usernames. [http://lists.wikimedia.org/pipermail/wikitech-ambassadors/2013-May/000233.html]
* A [[m:Special:MyLanguage/Flow|new discussion system]] for MediaWiki, called "Flow", is under development. Wikimedia designers need your help to inform other users, [http://unicorn.wmflabs.org/flow/ test the prototype] and discuss the interface. [http://lists.wikimedia.org/pipermail/wikitech-l/2013-May/069433.html].
* The Wikimedia Foundation is hiring people to act as links between software developers and users for VisualEditor. [http://lists.wikimedia.org/pipermail/wikitech-ambassadors/2013-May/000245.html]
</div>
<div style="font-size:90%; font-style:italic; background:#fdf6e3; padding:1em;">'''[[m:Tech/News|Tech news]]''' prepared by [[m:Tech/Ambassadors|tech ambassadors]] and posted by [[m:Global message delivery|Global message delivery]] • [[m:Tech/News#contribute|Contribute]] • [[m:Tech/News/2013/21|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Unsubscribe]] • ২০:১৬, ২০ মে ২০১৩ (ইউটিসি)
</div>
<div style="float:left; background:#eee8d5; border: .2em solid #dc322f; border-left: .7em solid #dc322f; padding: 1em; "><span style="color:#dc322f;font-weight:bold;">Important note:</span> This is the first edition of the [[m:Tech/News|Tech News]] weekly summaries, which help you monitor recent software changes likely to impact you and your fellow Wikimedians.
'''If you want to continue to receive the next issues every week''', please '''[[m:Global message delivery/Targets/Tech ambassadors|subscribe to the newsletter]]'''. You can subscribe your personal talk page and a community page like this one. The newsletter can be [[m:Tech/News/2013/21|translated into your language]].
You can also [[m:Tech/Ambassadors|become a tech ambassador]], [[m:Tech/News|help us write the next newsletter]] and [[m:Talk:Tech/News|tell us what to improve]]. Your feedback is greatly appreciated. [[m:user:guillom|guillom]] ২০:১৬, ২০ মে ২০১৩ (ইউটিসি)</div>
<!-- EdwardsBot 0455 -->-->
== Trademark discussion ==
Hi, apologies for posting this in English, but I wanted to alert your community to a discussion on Meta about potential changes to the Wikimedia Trademark Policy. Please translate this statement if you can. We hope that you will all participate in the discussion; we also welcome translations of the legal team’s statement into as many languages as possible and encourage you to voice your thoughts there. Please see the [[:m:Trademark practices discussion|Trademark practices discussion (on Meta-Wiki)]] for more information. Thank you! --[[:m:User:Mdennis_(WMF)|Mdennis (WMF)]] ([[:m:User talk:Mdennis_(WMF)|talk]])
<!-- EdwardsBot 0473 -->
== Universal Language Selector to replace Narayam and WebFonts extensions ==
On June 11, 2013, the [[mw:Universal Language Selector|Universal Language Selector]] (ULS) will replace the features of Mediawiki extensions Narayam and WebFonts. The ULS provides a flexible way of configuring and delivering language settings like interface language, fonts, and input methods (keyboard mappings).
Please read the [[m:Announcement Universal Language Selector|announcement on Meta-Wiki]] for more information. [[m:User talk:Runab WMF|Runab]] ১৪:০৬, ৫ জুন ২০১৩ (ইউটিসি) ''(posted via [[m:Global message delivery|Global message delivery]])''
<!-- EdwardsBot 0474 -->
== [[:m:Requests_for_comment/X!'s_Edit_Counter|X!'s Edit Counter]] ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
<small>(Sorry for writing in English. You can [[:m:Special:MyLanguage/Requests_for_comment/X!%27s_Edit_Counter/Summary|translate the proposal]].)</small>
Should [[tools:~tparis/pcount|X!'s edit counter]] retain the opt-in requirement? Your input is strongly encouraged. [[:m:Requests_for_comment/X!'s_Edit_Counter|Voice your input here]].—[[:m:w:User:Cyberpower678|<span style="color:green;font-family:Neuropol">cyberpower]] [[:m:w:User talk:Cyberpower678|<sup style="color:purple;font-family:arnprior">Chat]]<sub style="margin-left:-4.4ex;color:purple;font-family:arnprior">Automation</sub> ০৪:১০, ২৩ জুন ২০১৩ (ইউটিসি)
:<small>Distributed via [[:m:Global message delivery|Global message delivery]]. (Wrong page? [[:m:Distribution list/Global message delivery|Fix here]].)</small>
</div>
<!-- EdwardsBot 0505 -->
== Pywikipedia is migrating to git ==
Hello, Sorry for English but It's very important for bot operators so I hope someone translates this.
[[mw:PWB|Pywikipedia]] is migrating to Git so after July 26, SVN checkouts won't be updated If you're using Pywikipedia you have to switch to git, otherwise you will use out-dated framework and your bot might not work properly. There is a [[mw:Manual:Pywikipediabot/Gerrit|manual]] for doing that and a [https://blog.wikimedia.org/2013/07/23/pywikipediabot-moving-to-git-on-july-26/ blog post] explaining about this change in non-technical language. If you have question feel free to ask in [[mw:Manual talk:Pywikipediabot/Gerrit]], [https://lists.wikimedia.org/mailman/listinfo/pywikipedia-l mailing list], or in the [irc://irc.freenode.net/#pywikipediabot IRC channel]. Best [[mw:User:Ladsgroup|Amir]] <small>(via [[m:Global message delivery|Global message delivery]]).</small> ১২:৫৫, ২৩ জুলাই ২০১৩ (ইউটিসি)
<!-- EdwardsBot 0534 -->
== HTTPS for users with an account ==
Greetings. Starting on August 21 (tomorrow), all users with an account will be using [[m:w:en:HTTPS|HTTPS]] to access Wikimedia sites. HTTPS brings better security and improves your privacy. More information is available at [[m:HTTPS]].
If HTTPS causes problems for you, tell us [https://bugzilla.wikimedia.org on bugzilla], [[m:IRC|on IRC]] (in the <code>#wikimedia-operations</code> channel) or [[m:Talk:HTTPS|on meta]]. If you can't use the other methods, you can also send an e-mail to <code>https@wikimedia.org</code>.
[[m:User:Greg (WMF)|Greg Grossmeier]] <small>(via the [[m:Global message delivery|Global message delivery]] system)</small>. ১৮:৪৬, ২০ আগস্ট ২০১৩ (ইউটিসি) <small>(wrong page? [[m:Distribution list/Global message delivery|You can fix it.]])</small>
<!-- EdwardsBot 0560 -->
== [[:m:Community Logo/Request for consultation|Request for consultation on community logo]] ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
[[File:Wikimedia Community Logo.svg|thumb|Request for consultation on this community logo]]
First, I’d like to apologize for the English. If you can, please help to translate this for other members of your community.
The legal team at the Wikimedia Foundation would greatly appreciate your input on the best way to manage the "community logo" (pictured here) to best balance protection of the projects with community support. Accordingly, they have created a “request for consultation” on Meta where they set out briefly some of the issues to be considered and the options that they perceive. [[:m:Community Logo/Request for consultation|Your input would be invaluable]] in helping guide them in how best to serve our mission.
Thank you! --[[m:User:Mdennis|Mdennis]] ([[m:User talk:Mdennis|talk]]) <small>(via the [[m:Global message delivery|Global message delivery]] system)</small>. ০২:০২, ২৪ সেপ্টেম্বর ২০১৩ (ইউটিসি) <small>(wrong page? [[m:Distribution list/Global message delivery|You can fix it.]])</small>
</div>
<!-- EdwardsBot 0590 -->
== [[mw:Echo|Notifications]] ==
[[File:Notifications-Flyout-Screenshot-08-10-2013-Cropped.png|thumb|300px|Notifications inform you of new activity that affects you -- and let you take quick action.]]
''(This message is in English, please translate as needed)''
Greetings!
[[mw:Echo|Notifications]] will inform users about new activity that affects them on this wiki in a unified way: for example, this new tool will let you know when you have new talk page messages, edit reverts, mentions or links -- and is designed to augment (rather than replace) the watchlist. The Wikimedia Foundation's editor engagement team developed this tool (code-named 'Echo') earlier this year, to help users contribute more productively to MediaWiki projects.
We're now getting ready to bring Notifications to almost all other Wikimedia sites, and are aiming for a 22 October deployment, as outlined in [[mw:Echo/Release_Plan_2013|this release plan]]. It is important that notifications is translated for all of the languages we serve.
There are three major points of translation needed to be either done or checked:
*[https://translatewiki.net/w/i.php?title=Special%3AMessageGroupStats&x=D&group=ext-echo#sortable:3=desc Echo on translatewiki for user interface] - you must have an account on translatewiki to translate
*[https://translatewiki.net/w/i.php?title=Special%3AMessageGroupStats&x=D&group=ext-thanks#sortable:3=desc Thanks on translatewiki for user interface] - you must have an account on translatewiki to translate
*[[mw:Help:Notifications|Notifications help on mediawiki.org]]. This page can be hosted after translation on mediawiki.org or we can localize it to this Wikipedia. You do not have to have an account to translate on mediawiki, but single-user login will create it for you there if you follow the link.
:*[[mw:Echo/Release Plan 2013#Checklist|Checklist]]
Please let us know if you have any questions, suggestions or comments about this new tool. For more information, visit [[mw:Echo_(Notifications)|this project hub]] and [[mw:Help:Notifications|this help page]]. [[m:User:Keegan (WMF)|Keegan (WMF)]] ([[m:User talk:Keegan (WMF)|talk]]) ১৮:১১, ৪ অক্টোবর ২০১৩ (ইউটিসি)
:<small>(via the [[m:Global message delivery|Global message delivery]] system) (wrong page? [[m:Distribution list/Global message delivery|You can fix it.]])</small>
<!-- EdwardsBot 0597 -->
== Speak up about the trademark registration of the Community logo. ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hi all,
Please join the consultation about the Community logo that represents Meta-Wiki: [[:m:Community Logo/Request for consultation]].
This community consultation was commenced on September 24. The following day, two individuals filed a legal opposition against the registration of the Community logo.
The question is whether the Wikimedia Foundation should seek a collective membership mark with respect to this logo or abandon its registration and protection of the trademark.
We want to make sure that everyone get a chance to speak up so that we can get clear direction from the community. We would therefore really appreciate the community's help in translating this announcement from English so that everyone is able to understand it.
Thanks,
[[m:User:Geoffbrigham|Geoff]] & [[m:User:YWelinder (WMF)|Yana]] ১৯:৪২, ৮ অক্টোবর ২০১৩ (ইউটিসি)
</div>
<!-- EdwardsBot 0601 -->
== Introducting Beta Features ==
<div lang="en" dir="ltr" class="mw-content-ltr">
''(Apologies for writing in English. Please translate if necessary)''
We would like to let you know about [[mw:About_Beta_Features|Beta Features]], a new program from the Wikimedia Foundation that lets you try out new features before they are released for everyone.
Think of it as a digital laboratory where community members can preview upcoming software and give feedback to help improve them. This special preference page lets designers and engineers experiment with new features on a broad scale, but in a way that's not disruptive.
Beta Features is now ready for testing on [[mw:Special:Preferences#mw-prefsection-betafeatures|MediaWiki.org]]. It will also be released on Wikimedia Commons and MetaWiki this Thursday, 7 November. Based on test results, the plan is to release it on all wikis worldwide on 21 November, 2013.
Here are the first features you can test this week:
* [[mw:Multimedia/About_Media_Viewer|Media Viewer]] — view images in large size or full screen
* [[mw:VisualEditor/Beta_Features/Formulae|VisualEditor Formulæ]] (for wikis with [[mw:VisualEditor|VisualEditor]]) — edit algebra or equations on your pages
* [[mw:Typography_Update|Typography Refresh]] — make text more readable (coming Thursday)
Would you like to try out Beta Features now? After you log in on MediaWiki.org, a small 'Beta' link will appear next to your 'Preferences'. Click on it to see features you can test, check the ones you want, then click 'Save'. Learn more on the [[mw:About_Beta_Features|Beta Features page]].
After you've tested Beta Features, please let the developers know what you think on [[mw:Talk:About_Beta_Features|this discussion page]] -- or report any bugs [http://wmbug.com/new?product=MediaWiki%20extensions&component=BetaFeatures here on Bugzilla]. You're also welcome to join [[m:IRC_office_hours#Upcoming_office_hours|this IRC office hours chat]] on Friday, 8 November at 18:30 UTC.
Beta Features was developed by the Wikimedia Foundation's Design, Multimedia and VisualEditor teams. Along with other developers, they will be adding new features to this experimental program every few weeks. They are very grateful to all the community members who helped create this project — and look forward to many more productive collaborations in the future.
Enjoy, and don't forget to let developers know what you think! [[m:User:Keegan (WMF)|Keegan (WMF)]] ([[m:User talk:Keegan (WMF)|talk]]) ১৯:৩২, ৫ নভেম্বর ২০১৩ (ইউটিসি)
:<small>Distributed via [[m:Global message delivery|Global message delivery]] (wrong page? [[m:Distribution list/Global message delivery|Correct it here]])</small>, ১৯:৩২, ৫ নভেম্বর ২০১৩ (ইউটিসি)
</div>
<!-- EdwardsBot 0622 -->
== Call for comments on draft trademark policy ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hi all,
The Wikimedia legal team invites you to participate in the development of the new Wikimedia trademark policy.
The [[:wmf:Trademark policy|current trademark policy]] was introduced in 2009 to protect the [[:wmf:Wikimedia trademarks|Wikimedia marks]]. We are now updating this policy to better balance permissive use of the marks with the legal requirements for preserving them for the community. The new draft trademark policy is ready for your review [[:m:Trademark policy|here]], and we encourage you to discuss it [[:m:Talk:Trademark policy|here]].
We would appreciate if someone would translate this message into your language so more members of your community can contribute to the conversation.
Thanks, <br />
[[:m:User:YWelinder (WMF)|Yana]] & [[:m:User:Geoffbrigham|Geoff]]
</div>
<!-- EdwardsBot 0657 -->
== ইম্পোর্টার ==
বর্তমানে আমাদের এই উইকি-তে ইম্পোর্ট করার সুবিধাটি নেই। এটি নিষ্ক্রিয় করা আছে। কিন্তু এই ইম্পোর্ট-এর মাধ্যমে বিভিন্ন ধরণের টেম্পলেট, গেজেট আমদানি করা যায় খুব সহজেই। তাই আমি এই উইকি-তে ইম্পোর্ট সক্রিয় করার প্রস্তাব রাখছি।--'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ১১:৫১, ২২ নভেম্বর ২০১৩ (ইউটিসি)
:{{agree}} প্রস্তাবকারী। --'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ১১:৫৮, ২২ নভেম্বর ২০১৩ (ইউটিসি)
:{{agree}} ইম্পোর্টের সুবিধা থাকলে খারাপ হয় না। তবে ছোট উইকি হিসেবে কপি-পেস্ট করেও চলে। — [[ব্যবহারকারী:Wikitanvir|তানভির]] • [[ব্যবহারকারী আলাপ:Wikitanvir|আলাপ]] • ১২:৫০, ২২ নভেম্বর ২০১৩ (ইউটিসি)
::{{disagree}} The wiki does not have any active community and edit number is so less. There is no initiative for improving the content or recruite new editor for that site. So I don't think this is the right time to enable the import operation for this small Wiki. My suggestion is, do improvment at the content side, attract people to do work with you on content, build an active community, when the site become busy, lots traffics and edits and there is demand for lots of templates and gadgets then apply for enable the import for this wiki.--[[User:Bellayet|Bellayet]] ([[User talk:Bellayet|আলাপ]]) ১২:৫৫, ২২ নভেম্বর ২০১৩ (ইউটিসি)
== Request for comment on Commons: Should Wikimedia support MP4 video? ==
''I apologize for this message being only in English. Please translate it if needed to help your community.''
The Wikimedia Foundation's [[mw:Multimedia|multimedia team]] seeks community guidance on a proposal to support the [[w:MP4|MP4 video format]]. This digital video standard is used widely around the world to record, edit and watch videos on mobile phones, desktop computers and home video devices. It is also known as [[w:MP4|H.264/MPEG-4 or AVC]].
Supporting the MP4 format would make it much easier for our users to view and contribute video on Wikipedia and Wikimedia projects -- and video files could be offered in dual formats on our sites, so we could continue to support current open formats (WebM and Ogg Theora).
However, MP4 is a patent-encumbered format, and using a proprietary format would be a departure from our current practice of only supporting open formats on our sites -- even though the licenses appear to have acceptable legal terms, with only a small fee required.
We would appreciate your guidance on whether or not to support MP4. Our Request for Comments presents views both in favor and against MP4 support, based on opinions we’ve heard in our discussions with community and team members.
[[commons:Commons:Requests for comment/MP4 Video|Please join this RfC -- and share your advice]].
All users are welcome to participate, whether you are active on Commons, Wikipedia, other Wikimedia project -- or any site that uses content from our free media repository.
You are also welcome to join tomorrow's [[m:IRC_office_hours#Upcoming_office_hours|Office hours chat on IRC]], this Thursday, January 16, at 19:00 UTC, if you would like to discuss this project with our team and other community members.
We look forward to a constructive discussion with you, so we can make a more informed decision together on this important topic. [[m:User:Keegan (WMF)|Keegan (WMF)]] ([[m:User talk:Keegan (WMF)|talk]]) ০৬:৪৭, ১৬ জানুয়ারি ২০১৪ (ইউটিসি)
<!-- Message sent by User:Keegan (WMF)@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=User:Keegan_(WMF)/MP4_notice_targets&oldid=7105580 -->
== Universal Language Selector will be enabled by default again on this wiki by 21 February 2014 ==
<div class="mw-content-ltr" lang="en" dir="ltr">
On January 21 2014 the MediaWiki extension [[mw:Universal Language Selector|Universal Language Selector]] (ULS) was [[mw:Universal Language Selector/Announcement Jan2014|disabled]] on this wiki. A new preference was added for logged-in users to turn on ULS. This was done to prevent slow loading of pages due to ULS webfonts, a behaviour that had been observed by the Wikimedia Technical Operations team on some wikis.
We are now ready to enable ULS again. The temporary preference to enable ULS will be removed. A [[commons:File:ULS-font-checkbox.png|new checkbox]] has been added to the Language Panel to enable/disable font delivery. This will be unchecked by default for this wiki, but can be selected at any time by the users to enable webfonts. This is an interim solution while we improve the feature of webfonts delivery.
You can read the [[mw:Universal Language Selector/Announcement Feb2014|announcement]] and the [[mw:Universal Language Selector/Upcoming Development Plan|development plan]] for more information. Apologies for writing this message only in English. Thank you. [[m:User_talk:Runab WMF|Runa]]
</div>
<!-- Message sent by User:Runab WMF@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/ULS_Reenable_2014&oldid=7490703 -->
== Amendment to the Terms of Use ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hello all,
Please join a discussion about a [[:m:Terms of use/Paid contributions amendment|proposed amendment]] to the [[wmf:Terms of Use|Wikimedia Terms of Use]] regarding undisclosed paid editing and we encourage you to voice your thoughts there. Please translate this statement if you can, and we welcome you to translate the proposed amendment and introduction. Please see [[:m:Terms of use/Paid contributions amendment|the discussion on Meta Wiki]] for more information. Thank you! [[:m:User:Slaporte (WMF)|Slaporte (WMF)]] ২২:০০, ২১ ফেব্রুয়ারি ২০১৪ (ইউটিসি)
</div>
<!-- Message sent by User:Jalexander@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=7499312 -->
== Call for project ideas: funding is available for community experiments ==
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:IEG_key_blue.png|100px|right]]
''I apologize if this message is not in your language. Please help translate it.''
Do you have an idea for a project that could improve your community? [[m:Grants:IEG|Individual Engagement Grants]] from the Wikimedia Foundation help support individuals and small teams to organize experiments for 6 months. You can get funding to try out your idea for online community organizing, outreach, tool-building, or research to help make {{SITENAME}} better. In March, we’re looking for new project proposals.
Examples of past Individual Engagement Grant projects:
*[[m:Grants:IEG/Build_an_effective_method_of_publicity_in_PRChina|Organizing social media for Chinese Wikipedia]] ($350 for materials)
*[[m:Grants:IEG/Visual_editor-_gadgets_compatibility|Improving gadgets for Visual Editor]] ($4500 for developers)
*[[m:Grants:IEG/The_Wikipedia_Library|Coordinating access to reliable sources for Wikipedians]] ($7500 for project management, consultants and materials)
*[[m:Grants:IEG/Elaborate_Wikisource_strategic_vision|Building community and strategy for Wikisource]] (€10000 for organizing and travel)
'''[[m:Grants:IEG#ieg-applying|Proposals]] are due by 31 March 2014.''' There are a number of ways to [[m:Grants:IEG|get involved]]!
Hope to have your participation,
--[[m:User:Sbouterse (WMF)|Siko Bouterse, Head of Individual
Engagement Grants, Wikimedia Foundation]] ১৯:৪৪, ২৮ ফেব্রুয়ারি ২০১৪ (ইউটিসি)
</div>
<!-- Message sent by User:AKoval (WMF)@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=IEG/MassMessageList&oldid=7675744 -->
== Proposed optional changes to Terms of Use amendment ==
<div lang="en" dir="ltr" class="mw-content-ltr">Hello all, in response to some community comments in the discussion on the amendment to the Terms of Use on undisclosed paid editing, we have prepared two optional changes. Please [[m:Terms_of_use/Paid_contributions_amendment#Optional_changes|read about these optional changes on Meta wiki]] and share your comments. If you can (and this is a non english project), please translate this announcement. Thanks! [[m:User:Slaporte (WMF)|Slaporte (WMF)]] ২১:৫৬, ১৩ মার্চ ২০১৪ (ইউটিসি) </div>
<!-- Message sent by User:Jalexander@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=7592057 -->
== Changes to the default site typography coming soon ==
<div lang="en" dir="ltr" class="mw-content-ltr">
This week, the typography on Wikimedia sites will be updated for all readers and editors who use the default "Vector" skin. This change will involve new serif fonts for some headings, small tweaks to body content fonts, text size, text color, and spacing between elements. The schedule is:
* '''April 1st''': non-Wikipedia projects will see this change live
* '''April 3rd''': Wikipedias will see this change live
This change is very similar to the "Typography Update" Beta Feature that has been available on Wikimedia projects since November 2013. After several rounds of testing and with feedback from the community, this Beta Feature will be disabled and successful aspects enabled in the default site appearance. Users who are logged in may still choose to use another skin, or alter their [[Special:MyPage/vector.css|personal CSS]], if they prefer a different appearance. Local [[MediaWiki:Common.css|common CSS]] styles will also apply as normal, for issues with local styles and scripts that impact all users.
For more information:
* [[mw:Typography refresh|Summary of changes and FAQ]]
* [[mw:Talk:Typography refresh|Discussion page]] for feedback or questions
* [https://blog.wikimedia.org/2014/03/27/typography-refresh/ Post] on blog.wikimedia.org
-- [[m:User:Steven (WMF)|Steven Walling]] (Product Manager) on behalf of the Wikimedia Foundation's [[mw:Design|User Experience Design]] team
</div>
<!-- Message sent by User:Steven (WMF)@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=7990801 -->
== Pronunciation Recording ==
[[File:Visual workflow draft for pronunciation recording gadget for Wiktionary users final en.webm|thumb|upright=2|Visual workflow draft for pronunciation recording gadget; If you have trouble watching this video here, watch it on [https://vimeo.com/89049570 vimeo]. A more extensive/explanative version [[:File:Visual workflow draft for pronunciation recording gadget for non-Wiktionary users final en.webm|is available]].]]
Dear Wiktionary community!
;About me: My name is Rainer Rillke, and I have been volunteering at [[:commons:User:Rillke|Wikimedia Commons]] for 3 years now, gathering experience around media files. I've been always interested in how things work and how one could improve them.
;The idea: One idea that appeared last Summer was allowing the recording of small chunks of speech, uploading that to Wikimedia Commons in the background and including this into a Wiktionary entry without having the hassle doing everything by hand or installing additional software. That idea led to the foundation of MediaWiki extension ''PronunciationRecording'' during the Google Summer of Code. However, this was not completed; instead development is stale for over 5 months now.
;My proposal: To make this going to work, so Wiktionary has an immediate benefit of this feature, I would like to provide the work done so far as a gadget and add some more work in regard to usability. You can see my plan at [[:m:Grants:IEG/Finish Pronunciation Recording]]. And more importantly, you can give me a hand, if you are interested [[:m:Grants:IEG/Finish Pronunciation Recording#Discussion|by writing your comments]].
Thanks and kind regards --[[User:Rillke|Rillke]] ([[User talk:Rillke|আলাপ]]) ১৭:২৪, ৭ এপ্রিল ২০১৪ (ইউটিসি)
<small>This message was delivered based on [[:commons:User:Rillke/gmd/prg]]. Translation fetched from: [[:commons:User:Rillke/prg/en]] -- ''[[User:Rillke|Rillke]]''<sup>[[User talk:Rillke|(q?)]]</sup> 17:42, 26 January 2013 (UTC)</small>
== Using only [[commons:Special:MyLanguage/Commons:Upload Wizard|UploadWizard]] for uploads ==
[[Image:Commons-logo.svg|right|220px|alt=Wikimedia Commons logo]]
<div lang="en" dir="ltr" class="mw-content-ltr">
Hello! It was noted that on this wiki you have [[Special:Statistics|less than 10 local files]]. Presumably, you therefore don't have interest nor energies to have [[commons:Category:Licensing templates|hundreds templates]] with the [[mw:Multimedia/Media Viewer/Template compatibility|now required HTML]], even less a local [[m:EDP|EDP]]. However, this means that users here will experience a mostly broken and/or [[wmf:Resolution:Licensing policy|illegal]] uploading.
I propose to
* '''have [[Special:Upload|local upload]] [[commons:Commons:Turning off local uploads|restricted]]''' to the "{{int:group-sysop}}" group (for emergency uploads) and
* the '''sidebar point to [[commons:Special:UploadWizard]]''',
so that you can avoid local maintenance and all users can have a functioning, easy upload interface [[translatewiki:Special:Translate/ext-uploadwizard|in their own language]]. All registered users can upload on Commons and [[Special:ListFiles|existing files]] will not be affected.
I'll get this done in one week from now.
# If you disagree with the proposal, just [[m:User:Nemo bis/Unused local uploads|remove your wiki from the list]].
# To make the UploadWizard even better, please tell your experience and ideas on [[commons:Commons:Upload Wizard feedback]].
[[m:User:Nemo_bis|Nemo]] ২০:৩৮, ১৯ মে ২০১৪ (ইউটিসি)
</div>
<!-- Message sent by User:Nemo bis@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=User_talk:Nemo_bis/Unused_local_uploads&oldid=8578536 -->
== Using only [[commons:Special:MyLanguage/Commons:Upload Wizard|UploadWizard]] for uploads ==
[[Image:Commons-logo.svg|right|220px|alt=Wikimedia Commons logo]]
<div lang="en" dir="ltr" class="mw-content-ltr">
Hello! It was noted that on this wiki you have [[Special:Statistics|less than 10 local files]]. Presumably, you therefore don't have interest nor energies to have [[commons:Category:Licensing templates|hundreds templates]] with the [[mw:Multimedia/Media Viewer/Template compatibility|now required HTML]], even less a local [[m:EDP|EDP]]. However, this means that users here will experience a mostly broken and/or [[wmf:Resolution:Licensing policy|illegal]] uploading.
I propose to
* '''have [[Special:Upload|local upload]] [[commons:Commons:Turning off local uploads|restricted]]''' to the "{{int:group-sysop}}" group (for emergency uploads) and
* the '''sidebar point to [[commons:Special:UploadWizard]]''',
so that you can avoid local maintenance and all users can have a functioning, easy upload interface [[translatewiki:Special:Translate/ext-uploadwizard|in their own language]]. All registered users can upload on Commons and [[Special:ListFiles|existing files]] will not be affected.
I'll get this done in one week from now.
# If you disagree with the proposal, just [[m:User:Nemo bis/Unused local uploads|remove your wiki from the list]].
# To make the UploadWizard even better, please tell your experience and ideas on [[commons:Commons:Upload Wizard feedback]].
[[m:User:Nemo_bis|Nemo]] ২০:৩৮, ১৯ মে ২০১৪ (ইউটিসি)
</div>
<!-- Message sent by User:Nemo bis@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=User_talk:Nemo_bis/Unused_local_uploads&oldid=8578536 -->
== Media Viewer ==
<br>
<div lang="en" dir="ltr" class="mw-content-ltr">
Greetings, my apologies for writing in English.
I wanted to let you know that [[mw:Multimedia/About Media Viewer|Media Viewer]] will be released to this wiki in the coming weeks. Media Viewer allows readers of Wikimedia projects to have an enhanced view of files without having to visit the file page, but with more detail than a thumbnail. You can try Media Viewer out now by turning it on in your [[Special:Preferences#mw-prefsection-betafeatures|Beta Features]]. If you do not enjoy Media Viewer or if it interferes with your work after it is turned on you will be able to disable Media Viewer as well in your [[Special:Preferences#mw-prefsection-rendering|preferences]]. I invite you to [[mw:Talk:Multimedia/About Media Viewer|share what you think]] about Media Viewer and how it can be made better in the future.
Thank you for your time. - [[m:User:Keegan (WMF)|Keegan (WMF)]] ২১:২৯, ২৩ মে ২০১৪ (ইউটিসি)
<small>--This message was sent using [[m:MassMessage|MassMessage]]. Was there an error? [[m:Talk:MassMessage|Report it!]]</small>
</div>
</br>
<!-- Message sent by User:Keegan (WMF)@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=User:Keegan_(WMF)/MassMessage/Multimedia/Media_Viewer&oldid=8631315 -->
== Media Viewer is now live on this wiki ==
<br>
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:Media_Viewer_Desktop_-_Large_Image_Opaque_Info.png|thumb|Media Viewer lets you see images in larger size]]
Greetings— and sorry for writing in English, please translate if it will help your community,
The Wikimedia Foundation's [[mw:Multimedia|Multimedia team]] is happy to announce that [[mw:Multimedia/About Media Viewer|Media Viewer]] was just released on this site today.
Media Viewer displays images in larger size when you click on their thumbnails, to provide a better viewing experience. Users can now view images faster and more clearly, without having to jump to separate pages — and its user interface is more intuitive, offering easy access to full-resolution images and information, with links to the file repository for editing. The tool has been tested extensively across all Wikimedia wikis over the past six months as a [[Special:Preferences#mw-prefsection-betafeatures|Beta Feature]] and has been [[mw:Multimedia/Media_Viewer/Release_Plan#Timeline|released]] to the largest Wikipedias, all language Wikisources, and the English Wikivoyage already.
If you do not like this feature, you can easily turn it off by clicking on "Disable Media Viewer" at the bottom of the screen, pulling up the information panel (or in your [[Special:Preferences#mw-prefsection-rendering|your preferences]]) whether you have an account or not. Learn more [[mw:Help:Multimedia/Media_Viewer#How_can_I_turn_off_this_feature.3F|in this Media Viewer Help page]].
Please let us know if you have any questions or comments about Media Viewer. You are invited to [[mw:Talk:Multimedia/About_Media_Viewer|share your feedback in this discussion on MediaWiki.org]] in any language, to help improve this feature. You are also welcome to [https://www.surveymonkey.com/s/media-viewer-1-all?c=announce-all take this quick survey in English], [https://www.surveymonkey.com/s/media-viewer-1-fr en français], [https://www.surveymonkey.com/s/media-viewer-1-es o español].
We hope you enjoy Media Viewer. Many thanks to all the community members who helped make it possible. - [[mw:User:Fabrice Florin (WMF)|Fabrice Florin (WMF)]] ([[m:User talk:Fabrice Florin (WMF)|talk]]) ২১:৫৫, ১৯ জুন ২০১৪ (ইউটিসি)
<small>--This message was sent using [[m:MassMessage|MassMessage]]. Was there an error? [[m:Talk:MassMessage|Report it!]]</small>
</div>
<!-- Message sent by User:Keegan (WMF)@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=User:Keegan_(WMF)/MassMessage/Multimedia/Media_Viewer&oldid=8631315 -->
== Letter petitioning WMF to reverse recent decisions ==
The Wikimedia Foundation recently created a new feature, "superprotect" status. The purpose is to prevent pages from being edited by elected administrators -- but permitting WMF staff to edit them. It has been put to use in only one case: to protect the deployment of the Media Viewer software on German Wikipedia, in defiance of a clear decision of that community to disable the feature by default, unless users decide to enable it.
If you oppose these actions, please add your name to this letter. If you know non-Wikimedians who support our vision for the free sharing of knowledge, and would like to add their names to the list, please ask them to sign an identical version of the letter on change.org.
* [[:m:Letter to Wikimedia Foundation: Superprotect and Media Viewer|Letter to Wikimedia Foundation: Superprotect and Media Viewer]]
* [http://www.change.org/p/lila-tretikov-remove-new-superprotect-status-and-permit-wikipedia-communities-to-enact-current-software-decisions-uninhibited Letter on change.org]
-- [[:m:User:JurgenNL|JurgenNL]] ([[:m:User talk:JurgenNL|talk]]) ১৭:৩৫, ২১ আগস্ট ২০১৪ (ইউটিসি)
<!-- Message sent by User:JurgenNL@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=9313374 -->
== Process ideas for software development ==
<div class=”mw-content-ltr”>
’’My apologies for writing in English.’’
Hello,
I am notifying you that a brainstorming session has been [[:m:Community Engagement (Product)/Process ideas|started on Meta]] to help the Wikimedia Foundation increase and better affect community participation in software development across all wiki projects. Basically, how can you be more involved in helping to create features on Wikimedia projects? We are inviting all interested users to voice their ideas on how communities can be more involved and informed in the product development process at the Wikimedia Foundation. It would be very appreciated if you could translate this message to help inform your local communities as well.
I and the rest of [[:m:Community Engagement (Product)|my team]] welcome you to participate. We hope to see you on Meta.
Kind regards,
-- [[m:User:Rdicerb (WMF)|Rdicerb (WMF)]] [[m:User talk:Rdicerb (WMF)|talk]] ২২:১৫, ২১ আগস্ট ২০১৪ (ইউটিসি)
<small>--This message was sent using [[m:MassMessage|MassMessage]]. Was there an error? [[m:Talk:MassMessage|Report it!]]</small>
</div>
<!-- Message sent by User:Keegan (WMF)@metawiki using the list at http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=9313374 -->
== বাংলা উইকিপিডিয়া ফটোগ্রাফি কনটেস্ট ২০১৪ ==
বাংলা উইকিপিডিয়ার দশম প্রতিষ্ঠাবার্ষিকী উপলক্ষে্য ১ সেপ্টেম্বর থেকে কমন্সে শুরু হয়েছে বাংলা উইকিপিডিয়া ফটোগ্রাফি কনটেস্ট ২০১৪।<br />
* যে কেউ প্রতিযোগিতায় অংশগ্রহণ করতে পারবেন এবং যত ইচ্ছা ছবি প্রতিযোগিতায় দিতে পারবেন।
* ছবি অবশ্যই নিজের তোলা হতে হবে এবং নিজে আপলোড করতে হবে<br />
এখানে বিস্তারিত দেখুন - '''[[:C:Commons:Bangla Wikipedia Photography Contest 2014/bn|বাংলা উইকিপিডিয়া ফটোগ্রাফি কনটেস্ট ২০১৪]]।'''--'''<span style="text-shadow:7px 7px 8px Black;">[[User:NahidSultan|<font face="Papyrus">যুদ্ধমন্ত্রী</font>]] <sup>[[User talk:NahidSultan#top|<font face="Papyrus">আলাপ</font>]]</sup></span>''' ২০:৩৩, ৩১ আগস্ট ২০১৪ (ইউটিসি)
== নতুন প্রকল্প উন্নয়নে অনুদান ==
শুভেচ্ছা! [[:m:Grants:IEG|ইন্ডিভিজ্যুয়াল এংগেজমেন্ট গ্র্যান্টস প্রোগ্রাম]] নতুন প্রকল্প কিংবা নতুন প্রস্তাবনার জন্য অনুদান প্রদান করবে। ১ সেপ্টেম্বর থেকে ৩০ সেপ্টেম্বর পর্যন্ত এ আবেদন গ্রহণ করা হবে। এ প্রস্তাবনা হতে পারে এমন একটি আইডিয়া, কোন টুলস কিংবা গ্যাজেট যা উইকিমিডিয়া প্রকল্প উন্নয়নে সহায়তা করবে, এমন একটি পদ্ধতি যা আপনার ভাষার উইকিপিডিয়া কমিউনিটিকে সহায়তা করবে, এমন একটি গবেষনা যা কোন একটা গুরুত্বপূর্ণ বিষয়ে হতে পারে অথবা এমন কিছু যা আমরা আগে কখনো ভাবিনি।
প্রকল্পের জন্য আপনার ২০০ ডলার থেকে ৩০,০০০ ডলার যাই প্রয়োজন হোক ইন্ডিভিজ্যুয়াল এংগেজমেন্ট গ্র্যান্টস সেটি দেবে এবং প্রয়োজনে এমন কাউকে নিয়োগ দেবে যে আপনার কাজে সহায়তা করবে।
*'''[[:m:Grants:IEG#ieg-apply|আপনার প্রস্তাবনা জমা দিন]]'''
*'''সাহায্য নিন''': [[:m:Grants:IdeaLab|আইডিয়া ল্যাব]] থেকে অথবা সামনে অনুষ্ঠিত [[:m:Grants:IdeaLab/Events#Upcoming_events|হ্যাংআউট পর্বে]] যোগ দিন। [[User:PEarley (WMF)|PEarley (WMF)]] ([[User talk:PEarley (WMF)|আলাপ]]) ১৪:৪৫, ৩ সেপ্টেম্বর ২০১৪ (ইউটিসি)
== Meta RfCs on two new global groups ==
<div lang="en" dir="ltr" class="mw-content-ltr">Hello all,
There are currently requests for comment open on meta to create two new global groups. The first is a group for members of the OTRS permissions queue, which would grant them autopatrolled rights on all wikis except those who opt-out. That proposal can be found at [[m:Requests for comment/Creation of a global OTRS-permissions user group]]. The second is a group for Wikimedia Commons admins and OTRS agents to view deleted file pages through the 'viewdeletedfile' right on all wikis except those who opt-out. The second proposal can be found at [[m:Requests for comment/Global file deletion review]].
We would like to hear what you think on both proposals. Both are in English; if you wanted to translate them into your native language that would also be appreciated.
It is possible for individual projects to opt-out, so that users in those groups do not have any additional rights on those projects. To do this please start a local discussion, and if there is consensus you can request to opt-out of either or both at [[m:Stewards' noticeboard]].
Thanks and regards, [[m:User:Ajraddatz|Ajraddatz]] ([[m:User talk:Ajraddatz|talk]]) ১৮:০৪, ২৬ অক্টোবর ২০১৪ (ইউটিসি)</div>
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=10024331-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Ajraddatz@metawiki পাঠিয়েছেন -->
== Global AbuseFilter ==
<div lang="en" dir="ltr" class="mw-content-ltr">Hello,
[[mw:Special:MyLanguage/Extension:AbuseFilter|AbuseFilter]] is a MediaWiki extension used to detect likely abusive behavior patterns, like pattern vandalism and spam. In 2013, [[m:Special:Mylanguage/Global AbuseFilter|Global AbuseFilters]] were enabled on a limited set of wikis including Meta-Wiki, MediaWiki.org, Wikispecies and (in early 2014) all the "[https://noc.wikimedia.org/conf/highlight.php?file=small.dblist small wikis]". Recently, global abuse filters were enabled on "[https://noc.wikimedia.org/conf/highlight.php?file=medium.dblist medium sized wikis]" as well. These filters are currently managed by stewards on Meta-Wiki and have shown to be very effective in preventing mass spam attacks across Wikimedia projects. However, there is currently no policy on how the global AbuseFilters will be managed although there are proposals. There is an ongoing [[m:Requests for comment/Global AbuseFilter|request for comment]] on policy governing the use of the global AbuseFilters. In the meantime, specific wikis can opt out of using the global AbuseFilter. These wikis can simply add a request to [[m:Global AbuseFilter/Opt-out wikis|this list]] on Meta-Wiki. More details can be found on [[m:Special:Mylanguage/Global AbuseFilter/2014 announcement|this page]] at Meta-Wiki. If you have any questions, feel free to ask on [[m:Talk:Global AbuseFilter|m:Talk:Global AbuseFilter]].
Thanks,
[[m:User:PiRSquared17|PiRSquared17]], [[m:User:Glaisher|Glaisher]]</div> — ১৭:৩৬, ১৪ নভেম্বর ২০১৪ (ইউটিসি)
<!-- http://meta.wikimedia.org/w/index.php?title=Global_AbuseFilter/2014_announcement_distribution_list&oldid=10495115-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Glaisher@metawiki পাঠিয়েছেন -->
== [Global proposal] m.{{SITENAME}}.org: {{int:group-all}} {{int:right-edit}} ==
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:Mediawiki-mobile-smartphone.png|thumb|MediaWiki mobile]]
Hi, this message is to let you know that, on domains like {{CONTENTLANGUAGE}}.'''m'''.wikipedia.org, '''unregistered users cannot edit'''. At the Wikimedia Forum, where global configuration changes are normally discussed, a few dozens users [[m:Wikimedia Forum#Proposal: restore normal editing permissions on all mobile sites|propose to restore normal editing permissions on all mobile sites]]. Please read and comment!
Thanks and sorry for writing in English, [[m:User:Nemo_bis|Nemo]] ২২:৩৩, ১ মার্চ ২০১৫ (ইউটিসি)
</div>
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=11428885-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Nemo bis@metawiki পাঠিয়েছেন -->
== [[ব্যবহারকারী:AftabBot|AftabBot]] অনুমোদনের অনুরোধ ==
আমি একটি বট চালাতে চাচ্ছি। বাংলা উইকিঅভিধানে বট অনুমোদনের কোন বুরোক্র্যাট নেই। তাই এখানে লেখা।
[[বিশেষ:Contributions/AftabBot|অবদান]] • [//toolserver.org/%7Etparis/count/index.php?name=AftabBot&lang=bn&wiki=wikipedia সম্পাদনা সংখ্যা] • [[sulutil:AftabBot|গ্লোবাল সম্পাদনা সংখ্যা]] • [[বিশেষ:Log/AftabBot|লগ]] • [[বিশেষ:Blockip/AftabBot|বাধা দান]] • [//bn.wiktionary.org/w/index.php?title=%E0%A6%AC%E0%A6%BF%E0%A6%B6%E0%A7%87%E0%A6%B7:Log/block&page=User:AftabBot বাধাদানের লগ] • [//bn.wiktionary.org/w/index.php?title=%E0%A6%AC%E0%A6%BF%E0%A6%B6%E0%A7%87%E0%A6%B7:Log/rights&page=User:AftabBot অধিকার লগ] • [[বিশেষ:Userrights/AftabBot|ফ্ল্যাগ অনুমোদন]]
* '''নাম''': [[ব্যবহারকারী:AftabBot|AftabBot]]
* '''অপারেটর''': [[User:Aftabuzzaman|Aftabuzzaman]]
* '''কাজ''': ব্যবহারকারীদের স্বাগতম জানানো ও অন্যান্য রক্ষণাবেক্ষণ (Welcoming new users, other maintenance)
* '''প্রোগ্রামিং ল্যাংগুয়েজ''': পাইথন (pywikipedia) & AWB
* '''সম্পাদনার মোড''': SemiAuto (অর্ধসয়ংক্রিয়)
* '''সম্পাদনার হার''': ৬টি (প্রতি মিনিটে)
* '''বিস্তারিত''': ব্যবহারকারীদের স্বাগতম জানানো। পাতা পরিষ্কারকরণ, বিষয়শ্রেণী ঠিক করা ইত্যাদি। --[[User:Aftabuzzaman|Aftabuzzaman]] ([[User talk:Aftabuzzaman|আলাপ]]) ১৮:০৪, ৭ মার্চ ২০১৫ (ইউটিসি)
#:{{agree}} [[User:Aftabuzzaman|আফতাব]],অন্যান্য উইকিগুলির রক্ষণাবেক্ষণের ক্ষেত্রে এই [[ব্যবহারকারী:AftabBot|বটটির]] অবদান খুবই ভাল। আমার মতে উইকিঅভিধানের উন্নতিসাধনেও এটি সহায়ক হবে। তবে ব্যবহারকারীদের "অটোমেটেড স্বাগতম বার্তা" বিষয়টি আমার পছন্দ নয়। এত ছোট একটি উইকির নতুন ব্যবহারকারীদের নিজেদেরই স্বাগতম বার্তা জানানো উচিত নয় কি?--[[User:Sujay25|সুজয় চন্দ্র]] ([[User talk:Sujay25|আলাপ]]) ১১:৪৯, ১৫ মার্চ ২০১৫ (ইউটিসি)
== SUL finalization update ==
<div class="mw-content-ltr">
Hi all,apologies for writing in English, please read [[m:Single_User_Login_finalisation_announcement/Schema_announcement|this page]] for important information and an update involving [[m:Help:Unified login|SUL finalization]], scheduled to take place in one month. Thanks. [[m:User:Keegan (WMF)|Keegan (WMF)]] ([[m:User talk:Keegan (WMF)|talk]]) ১৯:৪৫, ১৩ মার্চ ২০১৫ (ইউটিসি)
</div>
<!-- http://meta.wikimedia.org/w/index.php?title=User:Keegan_(WMF)/Everyone_but_meta_and_de&oldid=11538208-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Keegan (WMF)@metawiki পাঠিয়েছেন -->
== প্রশাসক হওয়ার আবেদন ==
গত মাসে উইকিঅভিধানে [[উইকিঅভিধান:প্রশাসক হওয়ার আবেদন/Moheen Reeyad|প্রশাসক হওয়ার আবেদন]] জানিয়েছিলাম। একজন প্রশাসক এসে নিরপেক্ষ ভূমিকা রেখে চলে গেল। মেয়াদোত্তীর্ণ হয়ে যাবার পরও আবেদনটি সফল/ব্যর্থ কোন তালিকায় এখনো সংগৃহীত হয় নি। এ বিষয়ে মতামত জানানোটা কী প্রশাসকদের দ্বায়িত্বের মধ্যে পড়ে নি? --[[User:Moheen Reeyad|মহীন রীয়াদ]] ([[User talk:Moheen Reeyad|আলাপ]]) ১৭:৫৩, ১৪ মার্চ ২০১৫ (ইউটিসি)
== Stewards confirmation rules ==
Hello, I made [[:m:Requests_for_comment/Confirmation_of_stewards|a proposal on Meta]] to change the rules for the steward confirmations. Currently consensus to remove is required for a steward to lose his status, however I think it's fairer to the community if every steward needed the consensus to keep. As this is an issue that affects all WMF wikis, I'm sending this notification to let people know & be able to participate. Best regards, --<small>[[User:MF-Warburg|MF-W]]</small> ১৬:১২, ১০ এপ্রিল ২০১৫ (ইউটিসি)
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=11737694-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:MF-Warburg@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Call for candidates|Nominations are being accepted for 2015 Wikimedia Foundation elections]] ==
''This is a message from the [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]. [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Accepting nominations|Translations]] are available.''
[[File:Wikimedia Foundation logo - vertical (2012-2016).svg|100px|right]]
Greetings,
I am pleased to announce that nominations are now being accepted for the 2015 Wikimedia Foundation Elections. This year the Board and the FDC Staff are looking for a diverse set of candidates from regions and projects that are traditionally under-represented on the board and in the movement as well as candidates with experience in technology, product or finance. To this end they have [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Call for candidates|published letters]] describing what they think is needed and, recognizing that those who know the community the best are the community themselves, the election committee is [[m:Special:MyLanguage/Wikimedia Foundation elections 2015|accepting nominations]] for community members you think should run and will reach out to those nominated to provide them with information about the job and the election process.
This year, elections are being held for the following roles:
''Board of Trustees''<br/>
The Board of Trustees is the decision-making body that is ultimately responsible for the long term sustainability of the Foundation, so we value wide input into its selection. There are three positions being filled. More information about this role can be found at [[m:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|the board elections page]].
''Funds Dissemination Committee (FDC)''<br/>
The Funds Dissemination Committee (FDC) makes recommendations about how to allocate Wikimedia movement funds to eligible entities. There are five positions being filled. More information about this role can be found at [[m:Special:MyLanguage/Wikimedia Foundation elections/FDC elections/2015|the FDC elections page]].
''Funds Dissemination Committee (FDC) Ombud''<br/>
The FDC Ombud receives complaints and feedback about the FDC process, investigates complaints at the request of the Board of Trustees, and summarizes the investigations and feedback for the Board of Trustees on an annual basis. One position is being filled. More information about this role can be found at [[m:Special:MyLanguage/Wikimedia Foundation elections/FDC Ombudsperson elections/2015|the FDC Ombudsperson elections page]].
The candidacy submission phase lasts from 00:00 UTC April 20 to 23:59 UTC May 5 for the Board and from 00:00 UTCApril 20 to 23:59 UTC April 30 for the FDC and FDC Ombudsperson. This year, we are accepting both self-nominations and nominations of others. More information on this election and the nomination process can be found on [[m:Special:MyLanguage/Wikimedia Foundation elections 2015|the 2015 Wikimedia elections page on Meta-Wiki]].
Please feel free to post a note about the election on your project's village pump. Any questions related to the election can be posted on the talk page on Meta, or sent to the election committee's mailing list, board-elections -at- wikimedia.org
On behalf of the Elections Committee,<br/>
-Gregory Varnum ([[m:User:Varnent|User:Varnent]])<br/>
Coordinator, [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]
''Posted by the [[m:User:MediaWiki message delivery|MediaWiki message delivery]] on behalf of the [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]], 05:03, 21 April 2015 (UTC) • [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Accepting nominations|Translate]] • [[m:Talk:Wikimedia Foundation elections 2015|Get help]]
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=11918510-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Varnent@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/FDC voting has begun|Wikimedia Foundation Funds Dissemination Committee elections 2015]] ==
[[File:Wikimedia Foundation RGB logo with text.svg|right|75px|link=m:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/FDC voting has begun]]
''This is a message from the [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]. [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/FDC voting has begun|Translations]] are available.''
[[m:Special:SecurePoll/vote/336|Voting has begun]] for [[m:Wikimedia Foundation elections 2015#Requirements|eligible voters]] in the 2015 elections for the ''[[m:Special:MyLanguage/Wikimedia Foundation elections/FDC elections/2015|Funds Dissemination Committee]]'' (FDC) and ''[[m:Special:MyLanguage/Wikimedia Foundation elections/FDC Ombudsperson elections/2015|FDC Ombudsperson]]''. Questions and discussion with the candidates for the ''[[m:Special:MyLanguage/Wikimedia Foundation elections/FDC elections/2015/Questions|Funds Dissemination Committee]]'' (FDC) and ''[[m:Special:MyLanguage/Wikimedia Foundation elections/FDC Ombudsperson elections/2015/Questions|FDC Ombudsperson]]'' will continue during the voting. Nominations for the ''[[m:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|Board of Trustees]]'' will be accepted until 23:59 UTC May 5.
The ''[[m:Special:MyLanguage/Grants:APG/Funds Dissemination Committee|Funds Dissemination Committee]]'' (FDC) makes recommendations about how to allocate Wikimedia movement funds to eligible entities. There are five positions on the committee being filled.
The ''[[m:Special:MyLanguage/Grants:APG/Funds Dissemination Committee/Ombudsperson role, expectations, and selection process|FDC Ombudsperson]]'' receives complaints and feedback about the FDC process, investigates complaints at the request of the [[m:Special:MyLanguage/Wikimedia Foundation Board of Trustees|Board of Trustees]], and summarizes the investigations and feedback for the Board of Trustees on an annual basis. One position is being filled.
The voting phase lasts from 00:00 UTC May 3 to 23:59 UTC May 10. '''[[m:Special:SecurePoll/vote/336|Click here to vote]].''' Questions and discussion with the candidates will continue during that time. '''[[m:Special:MyLanguage/Wikimedia Foundation elections/FDC elections/2015/Questions|Click here to ask the FDC candidates a question]]. [[m:Special:MyLanguage/Wikimedia Foundation elections/FDC Ombudsperson elections/2015/Questions|Click here to ask the FDC Ombudsperson candidates a question]].''' More information on the candidates and the elections can be found on the [[m:Special:MyLanguage/Wikimedia Foundation elections/FDC elections/2015|2015 FDC election page]], the [[m:Special:MyLanguage/Wikimedia Foundation elections/FDC Ombudsperson elections/2015|2015 FDC Ombudsperson election page]], and the [[m:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|2015 Board election page]] on Meta-Wiki.
On behalf of the Elections Committee,<br/>
-Gregory Varnum ([[m:User:Varnent|User:Varnent]])<br/>
Volunteer Coordinator, [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]
''Posted by the [[m:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] 03:45, 4 May 2015 (UTC) • [[m:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/FDC voting has begun|Translate]] • [[m:Talk:Wikimedia Foundation elections 2015|Get help]]
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12082785-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Varnent@metawiki পাঠিয়েছেন -->
== [https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/339?setlang=bn Wikimedia Foundation Board of Trustees elections 2015] ==
[[File:Wikimedia Foundation logo - vertical (2012-2016).svg|right|100px|link=metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Board voting has begun]]
''This is a message from the [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]. [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Board voting has begun|Translations]] are available.''
[https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/339?setlang=bn Voting has begun] for [[metawiki:Wikimedia Foundation elections 2015#Requirements|eligible voters]] in the 2015 elections for the ''[[metawiki:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|Wikimedia Foundation Board of Trustees]]''. Questions and discussion with the candidates for the ''[[metawiki:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015/Questions|Board]]'' will continue during the voting.
The ''[[metawiki:Wikimedia Foundation Board of Trustees|Wikimedia Foundation Board of Trustees]]'' is the ultimate governing authority of the Wikimedia Foundation, a 501(c)(3) non-profit organization registered in the United States. The Wikimedia Foundation manages many diverse projects such as Wikipedia and Commons.
The voting phase lasts from 00:00 UTC May 17 to 23:59 UTC May 31. '''[https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/339?setlang=bn Click here to vote].''' More information on the candidates and the elections can be found on the [[metawiki:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|2015 ''Board'' election page]] on Meta-Wiki.
On behalf of the Elections Committee,<br/>
-Gregory Varnum ([[metawiki:User:Varnent|User:Varnent]])<br/>
Volunteer Coordinator, [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]
''Posted by the [[metawiki:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] 17:20, 17 May 2015 (UTC) • [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Board voting has begun|Translate]] • [[metawiki:Talk:Wikimedia Foundation elections 2015|Get help]]
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12206621-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Varnent@metawiki পাঠিয়েছেন -->
== [https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/339?setlang=bn Wikimedia Foundation Board of Trustees elections 2015] ==
[[File:Wikimedia Foundation logo - vertical (2012-2016).svg|right|100px|link=metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Board voting has begun]]
''This is a message from the [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]. [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Board voting has begun|Translations]] are available.''
[https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/339?setlang=bn Voting has begun] for [[metawiki:Wikimedia Foundation elections 2015#Requirements|eligible voters]] in the 2015 elections for the ''[[metawiki:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|Wikimedia Foundation Board of Trustees]]''. Questions and discussion with the candidates for the ''[[metawiki:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015/Questions|Board]]'' will continue during the voting.
The ''[[metawiki:Wikimedia Foundation Board of Trustees|Wikimedia Foundation Board of Trustees]]'' is the ultimate governing authority of the Wikimedia Foundation, a 501(c)(3) non-profit organization registered in the United States. The Wikimedia Foundation manages many diverse projects such as Wikipedia and Commons.
The voting phase lasts from 00:00 UTC May 17 to 23:59 UTC May 31. '''[https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/339?setlang=bn Click here to vote].''' More information on the candidates and the elections can be found on the [[metawiki:Special:MyLanguage/Wikimedia Foundation elections/Board elections/2015|2015 ''Board'' election page]] on Meta-Wiki.
On behalf of the Elections Committee,<br/>
-Gregory Varnum ([[metawiki:User:Varnent|User:Varnent]])<br/>
Volunteer Coordinator, [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/Committee|2015 Wikimedia Foundation Elections Committee]]
''Posted by the [[metawiki:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] 17:20, 17 May 2015 (UTC) • [[metawiki:Special:MyLanguage/Wikimedia Foundation elections 2015/MassMessages/Board voting has begun|Translate]] • [[metawiki:Talk:Wikimedia Foundation elections 2015|Get help]]
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12206621-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Varnent@metawiki পাঠিয়েছেন -->
== [[ব্যবহারকারী:Yann]] ==
Hi, Please delete my page, so that [[m:User:Yann]] is shown. Thanks, [[User:Yann|Yann]] ([[User talk:Yann|আলাপ]]) ০৯:২২, ২৮ মে ২০১৫ (ইউটিসি)
::Done.--[[User:Sujay25|সুজয় চন্দ্র]] ([[User talk:Sujay25|আলাপ]]) ১৩:০৭, ২৮ মে ২০১৫ (ইউটিসি)
== Pywikibot compat will no longer be supported - Please migrate to pywikibot core ==
<div lang="en" dir="ltr" class="mw-content-ltr">
<small>Sorry for English, I hope someone translates this.</small><br />
[[mw:Special:MyLanguage/Manual:Pywikibot|Pywikibot]] (then "Pywikipediabot") was started back in 2002. In 2007 a new branch (formerly known as "rewrite", now called "core") was started from scratch using the MediaWiki API. The developers of Pywikibot have decided to stop supporting the compat version of Pywikibot due to bad performance and architectural errors that make it hard to update, compared to core. If you are using pywikibot compat it is likely your code will break due to upcoming MediaWiki API changes (e.g. [[phab:T101524|T101524]]). It is highly recommended you migrate to the core framework. There is a [[mw:Manual:Pywikibot/Compat deprecation|migration guide]], and please [[mw:Special:MyLanguage/Manual:Pywikibot/Communication|contact us]] if you have any problem.
There is an upcoming MediaWiki API breaking change that compat will not be updated for. If your bot's name is in [https://lists.wikimedia.org/pipermail/wikitech-l/2015-June/081931.html this list], your bot will most likely break.
Thank you,<br />
The Pywikibot development team, 19:30, 5 June 2015 (UTC)
</div>
<!-- http://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12271740-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Ladsgroup@metawiki পাঠিয়েছেন -->
== HTTPS ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Apologies for writing in English.
Hi everyone.
Over the last few years, the Wikimedia Foundation has [http://blog.wikimedia.org/2013/08/01/future-https-wikimedia-projects/ been working] towards enabling [[m:Special:MyLanguage/HTTPS|HTTPS]] by default for all users, including unregistered ones, for better privacy and security for both readers and editors. This has taken a long time, as there were different aspects to take into account. Our servers haven't been ready to handle it. The Wikimedia Foundation has had to balance sometimes conflicting goals.
[https://blog.wikimedia.org/2015/06/12/securing-wikimedia-sites-with-https/ Forced HTTPS] has just been implemented on all Wikimedia projects. Some of you might already be aware of this, as a few Wikipedia language versions were converted to HTTPS last week and the then affected communities were notified.
Most of Wikimedia editors shouldn't be affected at all. If you edit as registered user, you've probably already had to log in through HTTPS. We'll keep an eye on this to make sure everything is working as it should. Do get in touch with [[:m:HTTPS#Help!|us]] if you have any problems after this change or contact me if you have any other questions.
/[[:m:User:Johan (WMF)|Johan (WMF)]]
</div> ২২:০০, ১৯ জুন ২০১৫ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:Johan_(WMF)/HTTPS_global_message_delivery&oldid=12471979-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== HTTPS ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Apologies for writing in English.
Hi everyone.
Over the last few years, the Wikimedia Foundation has [http://blog.wikimedia.org/2013/08/01/future-https-wikimedia-projects/ been working] towards enabling [[m:Special:MyLanguage/HTTPS|HTTPS]] by default for all users, including unregistered ones, for better privacy and security for both readers and editors. This has taken a long time, as there were different aspects to take into account. Our servers haven't been ready to handle it. The Wikimedia Foundation has had to balance sometimes conflicting goals.
[https://blog.wikimedia.org/2015/06/12/securing-wikimedia-sites-with-https/ Forced HTTPS] has just been implemented on all Wikimedia projects. Some of you might already be aware of this, as a few Wikipedia language versions were converted to HTTPS last week and the then affected communities were notified.
Most of Wikimedia editors shouldn't be affected at all. If you edit as registered user, you've probably already had to log in through HTTPS. We'll keep an eye on this to make sure everything is working as it should. Do get in touch with [[:m:HTTPS#Help!|us]] if you have any problems after this change or contact me if you have any other questions.
/[[:m:User:Johan (WMF)|Johan (WMF)]]
</div> ০২:০৫, ২০ জুন ২০১৫ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:Johan_(WMF)/HTTPS_global_message_delivery&oldid=12471979-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Proposal to create PNG thumbnails of static GIF images ==
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:(R)-3-phenyl-cyclohexanone.gif|255px|thumb|The thumbnail of this gif is of really bad quality.]]
[[File:(R)-3-phenyl-cyclohanone.png|255px|thumb|How a PNG thumb of this GIF would look like]]
There is a [[w:c:Commons:Village_pump/Proposals#Create_PNG_thumbnails_of_static_GIF_images|proposal]] at the Commons Village Pump requesting feedback about the thumbnails of static GIF images: It states that static GIF files should have their thumbnails created in PNG. The advantages of PNG over GIF would be visible especially with GIF images using an alpha channel. (compare the thumbnails on the side)
This change would affect all wikis, so if you support/oppose or want to give general feedback/concerns, please post them to the [[w:c:Commons:Village_pump/Proposals#Create_PNG_thumbnails_of_static_GIF_images|proposal page]]. Thank you. --[[w:c:User:McZusatz|McZusatz]] ([[w:c:User talk:McZusatz|talk]]) & [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ০৫:০৭, ২৪ জুলাই ২০১৫ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12485605-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:-revi@metawiki পাঠিয়েছেন -->
== Proposal to create PNG thumbnails of static GIF images ==
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:(R)-3-phenyl-cyclohexanone.gif|255px|thumb|The thumbnail of this gif is of really bad quality.]]
[[File:(R)-3-phenyl-cyclohanone.png|255px|thumb|How a PNG thumb of this GIF would look like]]
There is a [[w:c:Commons:Village_pump/Proposals#Create_PNG_thumbnails_of_static_GIF_images|proposal]] at the Commons Village Pump requesting feedback about the thumbnails of static GIF images: It states that static GIF files should have their thumbnails created in PNG. The advantages of PNG over GIF would be visible especially with GIF images using an alpha channel. (compare the thumbnails on the side)
This change would affect all wikis, so if you support/oppose or want to give general feedback/concerns, please post them to the [[w:c:Commons:Village_pump/Proposals#Create_PNG_thumbnails_of_static_GIF_images|proposal page]]. Thank you. --[[w:c:User:McZusatz|McZusatz]] ([[w:c:User talk:McZusatz|talk]]) & [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ০৭:২০, ২৪ জুলাই ২০১৫ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12485605-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:-revi@metawiki পাঠিয়েছেন -->
== What does a Healthy Community look like to you? ==
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:Community Health Cover art News portal.png|300px|right]]
Hi, <br>
The Community Engagement department at the Wikimedia Foundation has launched a new learning campaign. The WMF wants to record community impressions about what makes a healthy online community.
Share your views and/or create a drawing and take a chance to win a Wikimania 2016 scholarship!
Join the WMF as we begin a conversation about Community Health. Contribute a drawing or answer the questions [[meta:Grants:Evaluation/Community Health learning campaign|on the campaign's page.]]
=== Why get involved? ===
'''The world is changing. The way we relate to knowledge is transforming.''' As the next billion people come online, the Wikimedia movement is working to bring more users on the wiki projects. The way we interact and collaborate online are key to building sustainable projects. How accessible are Wikimedia projects to newcomers today? Are we helping each other learn?
<br/>
Share your views on this matter that affects us all!
<br>
'''We invite everyone to take part in this learning campaign. Wikimedia Foundation will distribute one Wikimania Scholarship 2016 among those participants who are eligible.'''
=== More information ===
* All participants must have a registered user of at least one month antiquity on any Wikimedia project before the starting date of the campaign.
* <span style="border-bottom:1px dotted"> All eligible contributions must be done until '''August 23, 2015 at <nowiki>23:59</nowiki> UTC''' </span>
* <big> Wiki link: '''[[meta:Grants:Evaluation/Community Health learning campaign|Community Health learning campaign]]''' </big>
* URL https://meta.wikimedia.org/wiki/Grants:Evaluation/Community_Health_learning_campaign
* Contact: [[meta:user:MCruz (WMF)|María Cruz]] / Twitter: {{@}}WikiEval #CommunityHealth / email: eval{{@}}wikimedia{{dot}}org
<br>
Happy editing!
<br>
<br>
[[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ২৩:৪২, ৩১ জুলাই ২০১৫ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12909005-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:MCruz (WMF)@metawiki পাঠিয়েছেন -->
== What does a Healthy Community look like to you? ==
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:Community Health Cover art News portal.png|300px|right]]
Hi, <br>
The Community Engagement department at the Wikimedia Foundation has launched a new learning campaign. The WMF wants to record community impressions about what makes a healthy online community.
Share your views and/or create a drawing and take a chance to win a Wikimania 2016 scholarship!
Join the WMF as we begin a conversation about Community Health. Contribute a drawing or answer the questions [[meta:Grants:Evaluation/Community Health learning campaign|on the campaign's page.]]
=== Why get involved? ===
'''The world is changing. The way we relate to knowledge is transforming.''' As the next billion people come online, the Wikimedia movement is working to bring more users on the wiki projects. The way we interact and collaborate online are key to building sustainable projects. How accessible are Wikimedia projects to newcomers today? Are we helping each other learn?
<br/>
Share your views on this matter that affects us all!
<br>
'''We invite everyone to take part in this learning campaign. Wikimedia Foundation will distribute one Wikimania Scholarship 2016 among those participants who are eligible.'''
=== More information ===
* All participants must have a registered user of at least one month antiquity on any Wikimedia project before the starting date of the campaign.
* <span style="border-bottom:1px dotted"> All eligible contributions must be done until '''August 23, 2015 at <nowiki>23:59</nowiki> UTC''' </span>
* <big> Wiki link: '''[[meta:Grants:Evaluation/Community Health learning campaign|Community Health learning campaign]]''' </big>
* URL https://meta.wikimedia.org/wiki/Grants:Evaluation/Community_Health_learning_campaign
* Contact: [[meta:user:MCruz (WMF)|María Cruz]] / Twitter: {{@}}WikiEval #CommunityHealth / email: eval{{@}}wikimedia{{dot}}org
<br>
Happy editing!
<br>
<br>
[[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ০০:৫৯, ১ আগস্ট ২০১৫ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=12909005-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:MCruz (WMF)@metawiki পাঠিয়েছেন -->
== উইকিকনফারেন্স ইন্ডিয়া ২০১৬ - স্বেচ্ছাসেবক আহ্বান ==
<div style="margin: 0.5em; border: 1px black solid; padding: 1em;background-color:#E3F0F4" >
{| style="border:1px black solid; padding:2em; border-collapse:collapse; width:100%;"
|-
! style="background-color:#FAFAFA; color:black; padding-left:2em; padding-top:.5em;" align=left |
সুধী,
২০১১ খ্রিস্টাব্দে [[:meta:WikiConference India 2011|ভারতের প্রথম জাতীয় স্তরের উইকিকনফারেন্স]] অনুষ্ঠিত হয়। ২০০১ খ্রিস্টাব্দে উইকিপিডিয়া শুরু হওয়ার পর থেকে দীর্ঘ ১৪ বছরে সেটিই ছিল একমাত্র সর্বভারতীয় স্তরের উইকিমিডিয়া সম্মেলন।
২০১৫ খ্রিস্টাব্দের জুলাই মাসে মেক্সিকো সিটিতে অনুষ্ঠিত [https://wikimania2015.wikimedia.org/wiki/Wikimania উইকিম্যানিয়া ২০১৫] আন্তর্জাতিক কনফারেন্সে [https://wikimania2015.wikimedia.org/wiki/Indic_Meetup ভারতীয় ভাষাগুলির উইকিপিডিয়ানদের মধ্যে একটি আলোচনাসভা] থেকে '''[[:meta:WikiConference India 2016|উইকিকনফারেন্স ইন্ডিয়া ২০১৬]]''' সংগঠিত করার পরিকল্পনা করা হয়। এই সভায় ভারতের পক্ষ থেকে উইকিম্যানিয়া ২০১৯ আন্তর্জাতিক কনফারেন্সের দরপত্র পাঠানোর সম্মিলিত সিদ্ধান্ত নেওযা হয়েছে। ভারতে উইকিম্যানিয়া সফল ভাবে পরিচালনা করতে গেলে, ভারতের সকল ভাষার উইকিমিডিয়া সম্প্রদায়কে একত্রিত হয়ে এক দল হিসেবে কাজ করতে হবে। সেই লক্ষ্য পূরণের জন্য প্রথম পদক্ষেপ হিসেবে '''[[:meta:WikiConference India 2016|উইকিকনফারেন্স ইন্ডিয়া ২০১৬]]''' একটি আদর্শ স্থান। এই সম্মেলনের ফলে আমরা আমদের শক্তি ও দুর্বলতা যাচাই করে উইকিম্যানিয়া ২০১৯ আন্তর্জাতিক কনফারেন্সের জন্য তৈরী হতে পারব। আমাদের বৈচিত্রের মধ্যে একতার ওপর নির্ভর করেই এই সম্মেলন সফল হবে বলে আশা রাখি। আমরা সকল ভারতীয় ভাষার সম্প্রদায়ের স্বেচ্ছাসেবকদের তাঁদের অদম্য সম্মিলিত ইচ্ছা, শক্তি ও একতা উর্দ্ধে তুলে ধরে এই লক্ষ্যপূরণের আহ্বান জানাই।
এই সম্মেলনে যোগ দিতে অনুগ্রহ করে [https://docs.google.com/forms/d/1R9skceycTFRpXs9pJASXGoWgZo0ZVwjnEAtPdlQ_EU8/viewform?usp=send_form) এই ফর্ম] পূর্ণ করুন।
ধন্যবাদান্তে,
উইকিকনফারেন্স ইন্ডিয়া ২০১৬ স্বেচ্ছাসেবকবৃন্দ
|}</div>
== আইআরসি চ্যানেল ==
সাম্প্রতিক পরিবর্তন পাাতাতে দুটি আইআরসি চ্যানেল রয়েছে, অন্যভাবে নেবেন না কিন্তু ভারতের জন্য আলাদা উইকিঅভিধানের চ্যানেল কেন? এটিতো বাংলা (bn) প্রজেক্ট দুই বাংলা মিলিয়েই!--'''<span style="text-shadow:7px 7px 8px Black;">[[User:NahidSultan|<font face="Papyrus">যুদ্ধমন্ত্রী</font>]] <sup>[[User talk:NahidSultan#top|<font face="Papyrus">আলাপ</font>]]</sup></span>''' ২২:৫৯, ৩ আগস্ট ২০১৫ (ইউটিসি)
:আমি এটা সম্পর্কে অবগত নই [[User:NahidSultan|নাহিদ]] ভাই। আমিও এটা দেখেছি কিন্তু, আমি জানি না কেন। আমার জানামতে বাংলা উইকির সম্প্রসারণে দুই বাংলা একসাথে কাজ করছে। [[User:Sujay25|সুজয়]]দা এটা একটু দেখুন। --'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ১৪:২৫, ৪ আগস্ট ২০১৫ (ইউটিসি)
::লিংক সরিয়ে দিয়েছি।--[[User:Sujay25|সুজয় চন্দ্র]] ([[User talk:Sujay25|আলাপ]]) ১৬:৪২, ৫ আগস্ট ২০১৫ (ইউটিসি)
== Introducing the Wikimedia public policy site ==
Hi all,
We are excited to introduce a new Wikimedia Public Policy site. The site includes resources and position statements on access, copyright, censorship, intermediary liability, and privacy. The site explains how good public policy supports the Wikimedia projects, editors, and mission.
Visit the public policy portal: https://policy.wikimedia.org/
Please help translate the [[m:Public policy|statements on Meta Wiki]]. You can [http://blog.wikimedia.org/2015/09/02/new-wikimedia-public-policy-site/ read more on the Wikimedia blog].
Thanks,
[[m:User:YWelinder (WMF)|Yana]] and [[m:User:Slaporte (WMF)|Stephen]] ([[m:User talk:Slaporte (WMF)|Talk]]) ১৮:১২, ২ সেপ্টেম্বর ২০১৫ (ইউটিসি)
''(Sent with the [[m:MassMessage#Global_message_delivery|Global message delivery system]])''
<!-- https://meta.wikimedia.org/w/index.php?title=User:Slaporte_(WMF)/Announcing_public_policy_site&oldid=13439030-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Slaporte (WMF)@metawiki পাঠিয়েছেন -->
== Open call for Individual Engagement Grants ==
''My apologies for posting this message in English. Please help translate it if you can.''
Greetings! The '''[[m:IEG|Individual Engagement Grants program]] is accepting proposals''' until September 29th to fund new tools, community-building processes, and other experimental ideas that enhance the work of Wikimedia volunteers. Whether you need a small or large amount of funds (up to $30,000 USD), Individual Engagement Grants can support you and your team’s project development time in addition to project expenses such as materials, travel, and rental space.
*[[m:Grants:IEG#ieg-apply|'''Submit''' a grant request]]
*[[m:Grants:IdeaLab|'''Get help''' with your proposal in IdeaLab]] or [[m:Grants:IdeaLab/Events#Upcoming_events|an upcoming Hangout session]]
*[[m:Grants:IEG#ieg-engaging|'''Learn from examples''' of completed Individual Engagement Grants]]
Thanks,
[[m:User:I JethroBT (WMF)|I JethroBT (WMF)]], [[m:Community Resources|Community Resources]], Wikimedia Foundation. ২০:৫২, ৪ সেপ্টেম্বর ২০১৫ (ইউটিসি)
([[m:User:I JethroBT (WMF)/IEG 2015 Targets|''Opt-out Instructions'']]) <small>This message was sent by [[m:User:I JethroBT (WMF)|I JethroBT (WMF)]] ([[m:User talk:I JethroBT (WMF)|talk]]) through [[m:User:MediaWiki message delivery|MediaWiki message delivery]].</small>
<!-- https://meta.wikimedia.org/w/index.php?title=User:I_JethroBT_(WMF)/IEG_2015_Targets&oldid=13476366-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== উইকিমিডিয়া হাইলাইটস, আগস্ট ২০১৫ ==
<div style="margin-top:10px; font-size:90%; padding-left:5px; font-family:Georgia, Palatino, Palatino Linotype, Times, Times New Roman, serif;">'''''[https://blog.wikimedia.org উইকিমিডিয়া ব্লগ]''' হতে আগস্ট ২০১৫ এর [[m:Special:MyLanguage/Wikimedia Highlights|হাইলাইটস]] সমূহ।''</div>[[File:Wikimedia_Foundation_RGB_logo_with_text.svg|80px|right]]
*[[m:Wikimedia_Highlights,_August_2015/bn#অনুসন্ধানের পর কয়েকশত 'ব্লাক হ্যাট' ইংরেজি উইকিপিডিয়া এ্যাকাউন্ট ব্লক করা হয়েছে।|অনুসন্ধানের পর কয়েকশত 'ব্লাক হ্যাট' ইংরেজি উইকিপিডিয়া এ্যাকাউন্ট ব্লক করা হয়েছে।]]
*[[m:Wikimedia_Highlights,_August_2015/bn#ট্রিপটিজ শিকারের খোঁজে ।|ট্রিপটিজ শিকারের খোঁজে ।]]
*[[m:Wikimedia_Highlights,_August_2015/bn#আমার জীবন একটি অটিসটিক উইকিপিঠিয়ানের।|আমার জীবন একটি অটিসটিক উইকিপিঠিয়ানের।]]
*[[m:Wikimedia_Highlights,_August_2015/bn#উইকিমিডিয়া ২০১৫ হতে বিষয়বস্তুর হালনাগাত অনুবাদ।|উইকিমিডিয়া ২০১৫ হতে বিষয়বস্তুর হালনাগাত অনুবাদ।]]
*[[m:Wikimedia_Highlights,_August_2015/bn#উইকিপিডিয়া ব্যবহার করে কলোম্বিয়ার আদিবাসী ভাষাগুলোর সংরক্ষণ।|উইকিপিডিয়া ব্যবহার করে কলোম্বিয়ার আদিবাসী ভাষাগুলোর সংরক্ষণ।]]
*[[m:Wikimedia_Highlights,_August_2015/bn#যখন সাংস্কৃতিক ঐতিহ্য পায় ডিজিটাল জীবন।|যখন সাংস্কৃতিক ঐতিহ্য পায় ডিজিটাল জীবন।]]
<div style="margin-top:10px; font-size:90%; padding-left:5px; font-family:Georgia, Palatino, Palatino Linotype, Times, Times New Roman, serif;">'''[[m:Special:MyLanguage/Wikimedia Highlights|About]]''' · [[m:Global message delivery/Targets/Wikimedia Highlights|Subscribe]] · <small>Distributed via [[m:Special:Mylanguage/MassMessage|MassMessage]] (wrong page? [[m:Distribution list/Global message delivery|Correct it here]])</small>, ২১:৩৭, ১৬ সেপ্টেম্বর ২০১৫ (ইউটিসি)</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery/bn&oldid=13664693-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:ASherman (WMF)@metawiki পাঠিয়েছেন -->
== Only one week left for Individual Engagement Grant proposals! ==
(Apologies for using English below, please help translate if you are able.)
'''There is still one week left to submit [[m:IEG|Individual Engagement Grant]] (IEG) proposals''' before the September 29th deadline. If you have ideas for new tools, community-building processes, and other experimental projects that enhance the work of Wikimedia volunteers, start your proposal today! Please encourage others who have great ideas to apply as well. Support is available if you want help turning your idea into a grant request.
*[[m:Grants:IEG#ieg-apply|'''Submit''' a grant request]]
*[[m:Grants:IdeaLab|'''Get help''' with your proposal in IdeaLab]]
*[[m:Grants:IEG#ieg-engaging|'''Learn from examples''' of completed Individual Engagement Grants]]
[[m:User:I JethroBT (WMF)|I JethroBT (WMF)]], [[m:Community Resources|Community Resources]] ২১:০১, ২২ সেপ্টেম্বর ২০১৫ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:I_JethroBT_(WMF)/IEG_2015_Targets&oldid=13754911-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== Reimagining WMF grants report ==
''(My apologies for using English here, please help translate if you are able.)''
Last month, we asked for community feedback on [[m:Grants:IdeaLab/Reimagining WMF grants| a proposal to change the structure of WMF grant programs]]. Thanks to the 200+ people who participated! '''[[m:Grants:IdeaLab/Reimagining_WMF_grants/Outcomes|
A report]]''' on what we learned and changed based on this consultation is now available.
Come read about the findings and next steps as WMF’s Community Resources team begins to implement changes based on your feedback. Your questions and comments are welcome on [[m:Grants talk:IdeaLab/Reimagining WMF grants/Outcomes|the outcomes discussion page]].
With thanks, [[m:User:I JethroBT (WMF)|I JethroBT (WMF)]] ১৬:৫৬, ২৮ সেপ্টেম্বর ২০১৫ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Grants:IdeaLab/Reimagining_WMF_grants/ProjectTargets&oldid=13850666-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== Reimagining WMF grants report ==
''(My apologies for using English here, please help translate if you are able.)''
Last month, we asked for community feedback on [[m:Grants:IdeaLab/Reimagining WMF grants| a proposal to change the structure of WMF grant programs]]. Thanks to the 200+ people who participated! '''[[m:Grants:IdeaLab/Reimagining_WMF_grants/Outcomes|
A report]]''' on what we learned and changed based on this consultation is now available.
Come read about the findings and next steps as WMF’s Community Resources team begins to implement changes based on your feedback. Your questions and comments are welcome on [[m:Grants talk:IdeaLab/Reimagining WMF grants/Outcomes|the outcomes discussion page]].
With thanks, [[m:User:I JethroBT (WMF)|I JethroBT (WMF)]] ১৮:৩৩, ২৮ সেপ্টেম্বর ২০১৫ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Grants:IdeaLab/Reimagining_WMF_grants/ProjectTargets&oldid=13850666-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== Community Wishlist Survey ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hi everyone! Apologies for posting in English. Translations are very welcome.
The [[:m:Community Tech|Community Tech team]] at the Wikimedia Foundation is focused on building improved curation and moderation tools for experienced Wikimedia contributors. We're now starting a '''[[:m:2015 Community Wishlist Survey|Community Wishlist Survey]]''' to find the most useful projects that we can work on.
For phase 1 of the survey, we're inviting all active contributors to submit brief proposals, explaining the project that you'd like us to work on, and why it's important. Phase 1 will last for 2 weeks. In phase 2, we'll ask you to vote on the proposals. Afterwards, we'll analyze the top 10 proposals and create a prioritized wishlist.
While most of this process will be conducted in English, we're inviting people from any Wikimedia wiki to submit proposals. We'll also invite volunteer translators to help translate proposals into English.
Your proposal should include: the problem that you want to solve, who would benefit, and a proposed solution, if you have one. You can submit your proposal on the Community Wishlist Survey page, using the entry field and the big blue button. We will be accepting proposals for 2 weeks, ending on November 23.
We're looking forward to hearing your ideas!
</div> <div lang="en" dir="ltr" class="mw-content-ltr">Community Tech Team via [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ২১:৫৮, ৯ নভেম্বর ২০১৫ (ইউটিসি)</div>
<!-- https://meta.wikimedia.org/w/index.php?title=User:Johan_(WMF)/Target_lists/Global_distribution&oldid=14554458-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Wikimania 2016 scholarships ambassadors needed ==
<div lang="en" dir="ltr" class="mw-content-ltr">
Hello! [[wm2016:|Wikimania 2016]] scholarships will soon be open; by the end of the week we'll form the committee and we need your help, see [[wm2016:Special:MyLanguage/Scholarship committee|Scholarship committee]] for details.
If you want to carefully review nearly a thousand applications in January, you might be a perfect committee member. Otherwise, you can '''volunteer as "ambassador"''': you will observe all the committee activities, ensure that people from your language or project manage to apply for a scholarship, translate '''scholarship applications written in your language''' to English and so on. Ambassadors are allowed to ask for a scholarship, unlike committee members.
[[wm2016:Scholarship committee|Wikimania 2016 scholarships subteam]] ১০:৪৭, ১০ নভেম্বর ২০১৫ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=14347818-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Nemo bis@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Free Bassel/MassMessages/2015 Free Bassel banner straw poll|Your input requested on the proposed #FreeBassel banner campaign]] ==
''This is a message regarding the [[:m:Special:MyLanguage/Free Bassel/Banner|proposed 2015 Free Bassel banner]]. [[m:Special:MyLanguage/Free Bassel/MassMessages/2015 Free Bassel banner straw poll|Translations]] are available.''
Hi everyone,
This is to inform all Wikimedia contributors that a [[:m:Special:MyLanguage/Free Bassel/Banner/Straw poll|straw poll seeking your involvement]] has just been started on Meta-Wiki.
As some of your might be aware, a small group of Wikimedia volunteers have proposed a banner campaign informing Wikipedia readers about the urgent situation of our fellow Wikipedian, open source software developer and Creative Commons activist, [[:w:Bassel Khartabil|Bassel Khartabil]]. An exemplary [[:m:Special:MyLanguage/Free Bassel/Banner|banner]] and an [[:m:Special:MyLanguage/Free Bassel/Banner|explanatory page]] have now been prepared, and translated into about half a dozen languages by volunteer translators.
We are seeking [[:m:Special:MyLanguage/Free Bassel/Banner/Straw poll|your involvement to decide]] if the global Wikimedia community approves starting a banner campaign asking Wikipedia readers to call on the Syrian government to release Bassel from prison. We understand that a campaign like this would be unprecedented in Wikipedia's history, which is why we're seeking the widest possible consensus among the community.
Given Bassel's urgent situation and the resulting tight schedule, we ask everyone to [[:m:Special:MyLanguage/Free Bassel/Banner/Straw poll|get involved with the poll and the discussion]] to the widest possible extent, and to promote it among your communities as soon as possible.
(Apologies for writing in English; please kindly [[m:Special:MyLanguage/Free Bassel/MassMessages/2015 Free Bassel banner straw poll|translate]] this message into your own language.)
Thank you for your participation!
''Posted by the [[:m:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] 21:47, 25 November 2015 (UTC) • [[m:Special:MyLanguage/Free Bassel/MassMessages/2015 Free Bassel banner straw poll|Translate]] • [[:m:Talk:Free Bassel/Banner|Get help]]
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=14758733-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Varnent@metawiki পাঠিয়েছেন -->
== Community Wishlist Survey ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hi everyone! Apologies for posting this in English. Translations are very welcome.
We're beginning the second part of the Community Tech team's '''[[:m:2015 Community Wishlist Survey/Voting|Community Wishlist Survey]]''', and we're inviting all active contributors to vote on the proposals that have been submitted.
Thanks to you and other Wikimedia contributors, 111 proposals were submitted to the team. We've split the proposals into categories, and now it's time to vote! You can vote for any proposal listed on the pages, using the <nowiki>{{Support}}</nowiki> tag. Feel free to add comments pro or con, but only support votes will be counted. The voting period will be 2 weeks, ending on December 14.
The proposals with the most support votes will be the team's top priority backlog to investigate and address. Thank you for participating, and we're looking forward to hearing what you think!
Community Tech via
</div> [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ১৪:৩৮, ১ ডিসেম্বর ২০১৫ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:Johan_(WMF)/Target_lists/Global_distribution&oldid=14913494-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Wikipedia 15|Get involved in Wikipedia 15!]] ==
<div lang="en" dir="ltr" class="mw-content-ltr">
''This is a message from the [[m:Special:MyLanguage/Wikimedia Foundation|Wikimedia Foundation]]. [[m:Special:MyLanguage/Wikipedia 15/MassMessages/Get involved|Translations]] are available.''
[[File:International-Space-Station wordmark blue.svg|right|200px]]
As many of you know, January 15 is Wikipedia’s 15th Birthday!
People around the world are getting involved in the celebration and have started adding their [[m:Special:MyLanguage/Wikipedia 15/Events|events on Meta Page]]. While we are celebrating Wikipedia's birthday, we hope that all projects and affiliates will be able to utilize this celebration to raise awareness of our community's efforts.
Haven’t started planning? Don’t worry, there’s lots of ways to get involved. Here are some ideas:
* '''[[m:Special:MyLanguage/Wikipedia 15/Events|Join/host an event]]'''. We already have more than 80, and hope to have many more.
* '''[[m:Special:MyLanguage/Wikipedia 15/Media|Talk to local press]]'''. In the past 15 years, Wikipedia has accomplished extraordinary things. We’ve made a [[m:Special:MyLanguage/Wikipedia 15/15 years|handy summary]] of milestones and encourage you to add your own. More resources, including a [[m:Special:MyLanguage/Wikipedia 15/Media#releases|press release template]] and [[m:Special:MyLanguage/Communications/Movement Communications Skills|resources on working with the media]], are also available.
* '''[[m:Special:MyLanguage/Wikipedia 15/Material|Design a Wikipedia 15 logo]]'''. In place of a single icon for Wikipedia 15, we’re making dozens. Add your own with something fun and representative of your community. Just use the visual guide so they share a common sensibility.
* '''[[m:Special:MyLanguage/Wikipedia 15/Events/Package#birthdaywish|Share a message on social media]]'''. Tell the world what Wikipedia means to you, and add #wikipedia15 to the post. We might re-tweet or share your message!
Everything is linked on the [[m:Special:MyLanguage/Wikipedia 15|Wikipedia 15 Meta page]]. You’ll find a set of ten data visualization works that you can show at your events, and a [[c:Category:Wikipedia15 Mark|list of all the Wikipedia 15 logos]] that community members have already designed.
If you have any questions, please contact [[m:User:ZMcCune (WMF)|Zachary McCune]] or [[m:User:JSutherland (WMF)|Joe Sutherland]].
Thanks and Happy nearly Wikipedia 15!<br />
-The Wikimedia Foundation Communications team
''Posted by the [[m:User:MediaWiki message delivery|MediaWiki message delivery]], ২০:৫৮, ১৮ ডিসেম্বর ২০১৫ (ইউটিসি) • [[m:Wikipedia 15/MassMessages/Get involved|{{int:please-translate}}]] • [[m:Talk:Wikipedia 15|{{int:help}}]]
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=15158198-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== 2016 WMF Strategy consultation ==
:{{int:Please-translate}}
Hello, all.
The Wikimedia Foundation (WMF) has launched a consultation to help create and prioritize WMF strategy beginning July 2016 and for the 12 to 24 months thereafter. This consultation will be open, on Meta, from 18 January to 26 February, after which the Foundation will also use these ideas to help inform its Annual Plan. (More on our timeline can be found on that Meta page.)
Your input is welcome (and greatly desired) at the Meta discussion, [[:m:2016 Strategy/Community consultation|2016 Strategy/Community consultation]].
Apologies for English, where this is posted on a non-English project. We thought it was more important to get the consultation translated as much as possible, and good headway has been made there in some languages. There is still much to do, however! We created [[:m:2016 Strategy/Translations]] to try to help coordinate what needs translation and what progress is being made. :)
If you have questions, please reach out to me on my talk page or on the strategy consultation's talk page or by email to mdennis@wikimedia.org.
I hope you'll join us! [[:m:User:Mdennis (WMF)|Maggie Dennis]] via [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ১৯:০৭, ১৮ জানুয়ারি ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:PEarley_(WMF)/Mass_Message_-_large&oldid=15253743-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Mdennis (WMF)@metawiki পাঠিয়েছেন -->
== Open Call for Individual Engagement Grants ==
[[File:IEG barnstar 2.png|right|100px]]
{{int:Please-translate}}:
Greetings! The '''[[m:Special:MyLanguage/IEG|Individual Engagement Grants (IEG) program]] is accepting proposals''' until April 12th to fund new tools, research, outreach efforts, and other experiments that enhance the work of Wikimedia volunteers.
Whether you need a small or large amount of funds (up to $30,000 USD), IEGs can support you and your team’s project development time in addition to project expenses such as materials, travel, and rental space.
*[[m:Special:MyLanguage/Grants:IEG#ieg-apply|'''Submit''' a grant request]] or [[m:Special:MyLanguage/Grants:IdeaLab|'''draft''' your proposal]] in IdeaLab
*[[m:Special:MyLanguage/Grants:IdeaLab/Events#Upcoming_events|'''Get help''' with your proposal]] in an upcoming Hangout session
*[[m:Special:MyLanguage/Grants:IEG#ieg-engaging|'''Learn from examples''' of completed Individual Engagement Grants]]
With thanks, [[m:User:I JethroBT (WMF)|I JethroBT (WMF)]] ১৬:৪৮, ৩১ মার্চ ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:I_JethroBT_(WMF)/IEG_2015_Targets&oldid=15490024-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== No editing two times this week ==
[[foundation:|উইকিমিডিয়া ফাউন্ডেশন]] ডালাসে অবস্থিত তার নতুন উপাত্ত কেন্দ্রটি পরীক্ষা করবে। এটি উইকিপিডিয়া এবং উইকিমিডিয়ার অন্যান্য উইকিসমূহকে একটি দুর্যোগের পরেও অনলাইনে থাকা নিশ্চিত করবে। সবকিছু কাজ করছে তা নিশ্চিত করতে, উইকিমিডিয়ার প্রযুক্তি বিভাগ একটি পরিকল্পিত পরীক্ষা পরিচালনা করবে। এই পরীক্ষাটি প্রদর্শন করবে যে তাঁদের নির্ভরযোগ্যভাবে একটি উপাত্ত কেন্দ্র থেকে অন্য উপাত্ত কেন্দ্রে পরিবর্তন করা যাবে কিনা। এটির জন্য অনেক দলকে পরীক্ষার জন্য প্রস্তুত করা প্রয়োজন এবং অপ্রত্যাশিত সমস্যা সমাধানের জন্য প্রস্তুত থাকা প্রয়োজন।
তারা নতুন উপাত্ত কেন্দ্রে '''১৯ এপ্রিল, মঙ্গলবারে''' সকল ট্রাফিক নিয়ে যাবে।<br> '''২১ এপ্রিল, বৃহস্পতিবারে''', তাঁরা আবার প্রাথমিক উপাত্ত কেন্দ্রে ফিরে আসবে।
দুর্ভাগ্যবশত, [[mw:Manual:What is MediaWiki?|মিডিয়াউইকির]] কিছু সীমাবদ্ধতার কারণে, ঐ দুটি পরিবর্তনের সময় সব সম্পাদনা অবশ্যই বন্ধ রাখতে হবে। এই ব্যাঘাত ঘটানোর জন্য জন্য আমরা ক্ষমাপ্রার্থী, এবং আমরা ভবিষ্যতে এটিকে হ্রাস করার জন্য কাজ করছি।
'''সব উইকিতে অল্প সময়ের জন্য, আপনি পড়তে সক্ষম হতে হবে, কিন্তু সম্পাদনা করতে পারবেন না।'''
*মঙ্গলবার, ১৯ এপ্রিল এবং বৃহস্পতিবার, ২১ এপ্রিল বাংলাদেশ সময় রাত ৮টা থেকে (১৪:০০ ইউটিসি, ১৬:০০ সিইএসটি) আনুমানিক পরবর্তী ১৫ থেকে ৩০ মিনিটের জন্য আপনি সম্পাদনা করতে পারবেন না।
*এই সময়ে আপনি যদি সম্পাদনা বা সংরক্ষণ করার চেষ্টা করেন, তাহলে আপনি একটি ত্রুটির বার্তা দেখতে পাবেন। আমরা আশা করি যে কোন সম্পাদনা এই সময়ের মধ্যে নষ্ট হবে না, কিন্তু আমরা তার নিশ্চয়তা দিতে পারছি না। আপনি যদি ত্রুটির বার্তা দেখতে পান, তাহলে অনুগ্রহ করে অপেক্ষা করুন যতক্ষণ না সবকিছু স্বাভাবিক অবস্থায় ফিরে আসছে। এরপর আপনি আপনার সম্পাদনা সংরক্ষণ করতে সক্ষম হবেন। কিন্তু, আমরা আপনাকে প্রথমে আপনার পরিবর্তনের একটি অনুলিপি করে রাখতে সুপারিশ করছি।
''অন্যান্য প্রভাব'':
*পটভূমির কাজ ধীর হবে এবং কিছু নাও কাজ করতে পারে। লাল লিঙ্ক স্বাভাবিকের মত দ্রুত হালনাগাদ নাও হতে পারে। আপনি যদি একটি নিবন্ধ তৈরি করেন যা ইতিমধ্যে অন্য কোথাও সংযুক্ত আছে, সেক্ষেত্রে লিংক স্বাভাবিকের চেয়ে বেশি সময় ধরে লাল থাকবে। কিছু দীর্ঘ চলমান স্ক্রিপ্ট বন্ধ করতে হবে।
*১৮ এপ্রিলের সপ্তাহের সময়কালীন একটি কোড বাধাদান থাকবে। কোন অ-অপরিহার্য কোড স্থাপন সঞ্চালিত হবে না।
এই পরীক্ষাটি মূলত ২২ মার্চ করতে পরিকল্পনা করা হয়েছিল। ১৯ এবং ২১ এপ্রিল হচ্ছে নতুন তারিখ। আপনি [[wikitech:Switch Datacenter#Schedule for Q3 FY2015-2016 rollout|wikitech.wikimedia.org তে সময়সূচী পড়তে পারেন]]। সময়সূচীতে কোন পরিবর্তন হবে তাঁরা আপনাকে জানাবে। এই সম্পর্কে আরো বিজ্ঞপ্তি দেয়া হবে। '''দয়া করে আপনার সম্প্রদায়কে এই তথ্যটি জানান।''' /[[User:Johan (WMF)|Johan (WMF)]] ([[User talk:Johan (WMF)|আলাপ]]) ২১:১৩, ১৭ এপ্রিল ২০১৬ (ইউটিসি)
== উইকিমিডিয়া সারসংক্ষেপ, এপ্রিল ২০১৬ ==
<div style="margin-top:10px; font-size:90%; padding-left:5px; font-family:Georgia, Palatino, Palatino Linotype, Times, Times New Roman, serif;">''এপ্রিল ২০১৬-এর '''[https://blog.wikimedia.org Wikimedia blog]''' এর [[m:Special:MyLanguage/Wikimedia Highlights|সারসংক্ষেপ]] এখানে দেখুন।''</div>[[File:Wikimedia_Foundation_RGB_logo_with_text.svg|80px|right]]
*[[m:Wikimedia_Highlights,_April_2016/bn#খুঁজুন, গুরুত্ব নির্ধারণ করুন ও সুপারিশ করুন: উইকিপিডিয়ার বিভিন্ন ক্ষেত্রে জ্ঞানের শূণ্যস্থান পূরণ করার উদ্দেশ্যে সৃষ্ট নিবন্ধ সুপারিশকরণ ব্যবস্থা|খুঁজুন, গুরুত্ব নির্ধারণ করুন ও সুপারিশ করুন: উইকিপিডিয়ার বিভিন্ন ক্ষেত্রে জ্ঞানের শূণ্যস্থান পূরণ করার উদ্দেশ্যে সৃষ্ট নিবন্ধ সুপারিশকরণ ব্যবস্থা]]
*[[m:Wikimedia_Highlights,_April_2016/bn#ইন্ডিয়ান ইনস্টিটিউট অফ টেকনোলজি রুরকিতে অনুষ্ঠিত সর্বপ্রথম উইকিমিডিয়া হ্যাকাথন|ইন্ডিয়ান ইনস্টিটিউট অফ টেকনোলজি রুরকিতে অনুষ্ঠিত সর্বপ্রথম উইকিমিডিয়া হ্যাকাথন]]
*[[m:Wikimedia_Highlights,_April_2016/bn#প্যানোরামার স্বাধীনতার বিরুদ্ধে পদক্ষেপ: সুইডেনের আদালত উইকিমিডিয়া স্ভাইয়ের বিরুদ্ধে রায় দিল|প্যানোরামার স্বাধীনতার বিরুদ্ধে পদক্ষেপ: সুইডেনের আদালত উইকিমিডিয়া স্ভাইয়ের বিরুদ্ধে রায় দিল]]
*[[m:Wikimedia_Highlights,_April_2016/bn#সংক্ষেপ|সংক্ষেপ]]
<div style="margin-top:10px; font-size:90%; padding-left:5px; font-family:Georgia, Palatino, Palatino Linotype, Times, Times New Roman, serif;">'''[[m:Special:MyLanguage/Wikimedia Highlights|About]]''' · [[m:Global message delivery/Targets/Wikimedia Highlights|Subscribe]] · <small>Distributed via [[m:Special:Mylanguage/MassMessage|MassMessage]] (wrong page? [[m:Distribution list/Global message delivery|Correct it here]])</small>, ১৯:৩৪, ২৪ মে ২০১৬ (ইউটিসি)</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery/bn&oldid=13664693-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:ASherman (WMF)@metawiki পাঠিয়েছেন -->
== প্রধান পাতায় সামাজিক যোগাযোগ যুক্তকরন প্রসঙ্গে ==
সুধী, আপনারা সকলেই জানেন বর্তমানে সামাজিক যোগাযোগের মাধ্যমও প্রচারনার ক্ষেত্রে অনেক বেশি কার্যকরী। আমি উইকিঅভিধানের প্রধান পাতায় আমাদের সামাজিক যোগাযোগের মাধ্যমগুলো যুক্ত করার প্রস্তাব করছি। ইতিমধ্যে বাংলা উইকিপিডিয়ায় আলোচনার মাধ্যমে এটি যুক্ত করা হয়েছে। আমি নতুন একটি টেমপ্লেট তৈরি করেছি '''{{tl|প্রধান পাতা সামাজিক যোগাযোগ মাধ্যম}}'''। ধন্যবাদ। আপনাদের মতামত কাম্য।--'''<span style="text-shadow:7px 7px 8px Black;">[[User:NahidSultan|<font face="Papyrus">যুদ্ধমন্ত্রী</font>]] <sup>[[User talk:NahidSultan#top|<font face="Papyrus">আলাপ</font>]]</sup></span>''' ১৬:১২, ৭ জুন ২০১৬ (ইউটিসি)
:আমার পূর্ণ {{সমর্থন}} রয়েছে। --'''[[ব্যবহারকারী:Pratyya Ghosh|<span style="color:green;font-family:Verdana">প্র<font color="red">ত্য</font><font color="blue">য়</font></span>]]''' [[ব্যবহারকারী আলাপ:Pratyya Ghosh|<span style="color:orange;font-family:Verdana">(স্বাগতম)</span>]] ১৩:১৬, ২৯ জুন ২০১৬ (ইউটিসি)
== Compact Links coming soon to this wiki ==
{{int:Please-translate}}
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:Compact-language-links-list.png|thumb|Screenshot of Compact Language Links interlanguage list]]
Hello, I wanted to give a heads up about an upcoming feature for this wiki which you may seen already in the [[:m:Tech/News/2016/25|Tech News]]. [[:mw:Universal_Language_Selector/Compact_Language_Links|Compact Language Links]] has been available as a beta-feature on all Wikimedia wikis since 2014. With compact language links enabled, users are shown a much shorter list of languages on the interlanguage link section of an article (see image). This will be enabled as a feature in the soon for all users, which can be turned on or off using a preference setting. We look forward to your feedback and please do let us know if you have any questions. Details about Compact Language Links can be read in the [[:mw:Universal_Language_Selector/Compact_Language_Links|project documentation]].
Due to the large scale enablement of this feature, we have had to use [[:m:Global_message_delivery|MassMessage]] for this announcement and as a result it is only written in English. We will really appreciate if this message can be translated for other users of this wiki. The main announcement can also be translated on [[:mw:Universal_Language_Selector/Compact_Language_Links/Announcement_draft_June_2016|this page]]. Thank you. On behalf of the Wikimedia Language team: [[:mw:User:Runab_WMF|Runa Bhattacharjee (WMF)]] ([[mw:User talk:Runab_WMF|talk]])-১৩:০৬, ২৯ জুন ২০১৬ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/ULS_Compact_Links/1_July&oldid=15730563-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Runab WMF@metawiki পাঠিয়েছেন -->
== Compact Language Links enabled in this wiki today ==
{{int:Please-translate}}
<div lang="en" dir="ltr" class="mw-content-ltr">
[[File:Compact-language-links-list.png|thumb|Screenshot of Compact Language Links interlanguage list]]
[[:mw:Universal_Language_Selector/Compact_Language_Links|Compact Language Links]] has been available as a beta-feature on all Wikimedia wikis since 2014. With compact language links enabled, users are shown a much shorter list of languages on the interlanguage link section of an article (see image). Based on several factors, this shorter list of languages is expected to be more relevant for them and valuable for finding similar content in a language known to them. More information about compact language links can be found in [[:mw:Universal_Language_Selector/Compact_Language_Links|the documentation]].
From today onwards, compact language links has been enabled as the default listing of interlanguage links on this wiki. However, using the button at the bottom, you will be able to see a longer list of all the languages the article has been written in. The setting for this compact list can be changed by using the checkbox under ''User Preferences -> Appearance -> Languages''
The compact language links feature has been tested extensively by the Wikimedia Language team, which developed it. However, in case there are any problems or other feedback please let us know on the [[:mw:Talk:Universal_Language_Selector/Compact_Language_Links|project talk page]]. It is to be noted that on some wikis the presence of an existing older gadget that was used for a similar purpose may cause an interference for compact language list. We would like to bring this to the attention of the admins of this wiki. Full details are on [[phab:T131455|this phabricator ticket]] (in English).
Due to the large scale enablement of this feature, we have had to use [[:m:Global_message_delivery|MassMessage]] for this announcement and as a result it is only written in English. We will really appreciate if this message can be translated for other users of this wiki. Thank you. On behalf of the Wikimedia Language team: [[:mw:User:Runab_WMF|Runa Bhattacharjee (WMF)]] ([[mw:User talk:Runab_WMF|talk]])-০৩:০৫, ১ জুলাই ২০১৬ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/ULS_Compact_Links/1_July&oldid=15735887-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Runab WMF@metawiki পাঠিয়েছেন -->
== Open call for Project Grants ==
[[File:IEG barnstar 2.png|right|100px]]
{{int:Please-translate}}:
:Greetings! The '''[[m:Special:MyLanguage/Grants:Project|Project Grants program]] is accepting proposals''' from July 1st to August 2nd to fund new tools, research, offline outreach (including editathon series, workshops, etc), online organizing (including contests), and other experiments that enhance the work of Wikimedia volunteers.
:Whether you need a small or large amount of funds, Project Grants can support you and your team’s project development time in addition to project expenses such as materials, travel, and rental space.
:*[[m:Special:MyLanguage/Grants:Project/Apply|'''Submit''' a grant request]] or [[m:Special:MyLanguage/Grants:IdeaLab|'''draft''' your proposal]] in IdeaLab
:*[[m:Special:MyLanguage/Grants:IdeaLab/Events#Upcoming_events|'''Get help with your proposal''']] in an upcoming Hangout session
:*'''Learn from examples''' of completed [[m:Special:MyLanguage/Grants:IEG#ieg-engaging|Individual Engagement Grants]] or [[m:Special:MyLanguage/Grants:PEG/Requests#Grants_funded_by_the_WMF_in_FY_2015.E2.80.9316|Project and Event Grants]]
:Also accepting candidates to [[m:Special:MyLanguage/Grants:Project/Quarterly/Committee|join the Project Grants Committee through July 15.]]
:With thanks, [[m:User:I JethroBT (WMF)|I JethroBT (WMF)]] ১৫:২৫, ৫ জুলাই ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:I_JethroBT_(WMF)/IEG_2015_Targets&oldid=15504704-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== Save/Publish ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
The [[:mw:Editing|Editing]] team is planning to change the name of the [https://translatewiki.net/w/i.php?title=Special:Translations&namespace=8&message=Savearticle “<bdi>{{int:Savearticle}}</bdi>”] button to [https://translatewiki.net/w/i.php?title=Special:Translations&namespace=8&message=Publishpage “'''<bdi>{{int:Publishpage}}</bdi>'''”] and [https://translatewiki.net/w/i.php?title=Special:Translations&namespace=8&message=Publishchanges “'''<bdi>{{int:Publishchanges}}</bdi>'''”]. “<bdi>{{int:Publishpage}}</bdi>” will be used when you create a new page. “<bdi>{{int:Publishchanges}}</bdi>” will be used when you change an existing page. The names will be consistent in all editing environments.[https://phabricator.wikimedia.org/T131132][https://phabricator.wikimedia.org/T139033]
This change will probably happen during the week of 30 August 2016. The change will be announced in [[:m:Special:MyLanguage/Tech/News|Tech News]] when it happens.
If you are fluent in a language other than English, please check the status of translations at translatewiki.net for [https://translatewiki.net/w/i.php?title=Special:Translations&namespace=8&message=Publishpage “'''<bdi>{{int:Publishpage}}</bdi>'''”] and [https://translatewiki.net/w/i.php?title=Special:Translations&namespace=8&message=Publishchanges “'''<bdi>{{int:Publishchanges}}</bdi>'''”].
The main reason for this change is to avoid confusion for new editors. Repeated user research studies with new editors have shown that some new editors believed that [https://translatewiki.net/w/i.php?title=Special:Translations&namespace=8&message=Savearticle “<bdi>{{int:Savearticle}}</bdi>”] would save a private copy of a new page in their accounts, rather than permanently publishing their changes on the web. It is important for this part of the user interface to be clear, since it is difficult to remove public information after it is published. We believe that the confusion caused by the “<bdi>{{int:Savearticle}}</bdi>” button increases the workload for experienced editors, who have to clean up the information that people unintentionally disclose, and report it to the functionaries and stewards to suppress it. Clarifying what the button does will reduce this problem.
Beyond that, the goal is to make all the wikis and languages more consistent, and some wikis made this change many years ago. The [[:m:Legal|Legal team]] at the Wikimedia Foundation supports this change. Making the edit interface easier to understand will make it easier to handle licensing and privacy questions that may arise.
Any help pages or other basic documentation about how to edit pages will also need to be updated, on-wiki and elsewhere. On wiki pages, you can use the wikitext codes <code><nowiki>{{int:Publishpage}}</nowiki></code> and <code><nowiki>{{int:Publishchanges}}</nowiki></code> to display the new labels in the user's preferred language. For the language settings in [[Special:Preferences|your account preferences]], these wikitext codes produce “<bdi>{{int:Publishpage}}</bdi>” and “<bdi>{{int:Publishchanges}}</bdi>”.
Please share this news with community members who teach new editors and with others who may be interested.
</div> [[m:User:Whatamidoing (WMF)|Whatamidoing (WMF)]] ([[m:User talk:Whatamidoing (WMF)|talk]]) ১৮:০৩, ৯ আগস্ট ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=15790914-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Quiddity (WMF)@metawiki পাঠিয়েছেন -->
== RevisionSlider ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
From September 13th on, [[mw:Special:MyLanguage/Extension:RevisionSlider|RevisionSlider]] will be available as a [[mw:Special:MyLanguage/Beta Features|beta feature]] in your wiki. The RevisionSlider adds a slider view to the diff page, so that you can easily move between revisions. The feature fulfills a wish from the [[m:WMDE Technical Wishes|German Community’s Technical Wishlist]]. Everyone is invited to test the feature and we hope that it will serve you well in your work! </div> [[user:Birgit Müller (WMDE)|Birgit Müller (WMDE)]] ১৪:৫৬, ১২ সেপ্টেম্বর ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=WMDE_Technical_Wishes/Technical_Wishes_News_list_1&oldid=15903628-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Birgit Müller (WMDE)@metawiki পাঠিয়েছেন -->
== Wikidata for Wiktionary: let’s get ready for lexicographical data! ==
Hello all,
Sorry for writing this message in English, please feel free to translate it in your own language below.
The Wikidata development team will start working on integrating lexicographical data in the knowledge base soon and we want to make sure we do this together with you.
Wikidata is a constantly evolving project and after four years of existence, we start with implementing support for Wiktionary editors and content, by allowing you to store and improve lexicographical data, in addition to the concepts already maintained by thousands of editors on Wikidata.
We have been working on this idea for almost three years and improving it with a lot of inputs from community members to understand Wiktionary processes.
Starting this project, we hope that the editors will be able to collaborate across Wiktionaries more easily. We expect to increase the number of editors and visibility of languages, and we want to provide the groundwork for new tools for editors.
Our development plan contains several phases in order to build the structure to include lexicographical data:
* creating automatic interwiki links on Wiktionary,
* creating new entity types for lexemes, senses, and forms on Wikidata,
* providing data access to Wikidata from Wiktionary
* improving the display of lexicographical information on Wikidata.
During the next months, we will do our best to provide you the technical structure to store lexicographical data on Wikidata and use it on Wiktionary. Don’t hesitate to discuss this within your local community, and give us feedback about your needs and the particularities of your languages.
Information about supporting lexicographical entities on Wikidata is available [[d:Special:MyLanguage/Wikidata:Wiktionary|on this page]]. You can find an [https://upload.wikimedia.org/wikipedia/commons/6/60/Wikidata_for_Wiktionary_announcement.pdf overview of the project], [[d:Special:MyLanguage/Wikidata:Wiktionary/Development/Proposals/2015-05|the detail of the development plan]], answers to [[d:Special:MyLanguage/Wikidata:Wiktionary/FAQ|frequently asked questions]], and [[d:Special:MyLanguage/Wikidata:Wiktionary/How to help|a list]] of people ready to help us. If you want to have general discussions and questions about the project, please use [[d:Wikidata talk:Wiktionary|the general talk page]], as we won’t be able to follow all the talk pages on Wiktionaries. If you don’t feel comfortable with English, you can write in your own language and hopefully someone will volunteer to translate.
Bests regards, [[:d:User:Lea Lacroix (WMDE)|Lea Lacroix (WMDE)]] ([[:d:User talk:Lea Lacroix (WMDE)|talk]])
<!-- https://meta.wikimedia.org/w/index.php?title=User:Lea_Lacroix_(WMDE)/Distribution_list&oldid=15905971-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== Grants to improve your project ==
''{{int:Please-translate}}:''
Greetings! The [[:m:Grants:Project|Project Grants program]] is currently accepting proposals for funding. There is just over a week left to submit before the October 11 deadline. If you have ideas for software, offline outreach, research, online community organizing, or other projects that enhance the work of Wikimedia volunteers, start your proposal today! Please encourage others who have great ideas to apply as well. Support is available if you want help turning your idea into a grant request.
*'''[[:m:Grants:Project/Apply|Submit a grant request]]'''
*'''Get help''': In [[:m:Grants:IdeaLab|IdeaLab]] or an upcoming [[:m:Grants:Project#Upcoming_events|Hangout session]]
*'''Learn from examples''' of completed [[:m:Grants:IEG#ieg-engaging|Individual Engagement Grants]] or [[:m:Grants:PEG/Requests#Grants_funded_by_the_WMF_in_FY_2015.E2.80.9316|Project and Event Grants]]
[[m:User:I JethroBT (WMF)|I JethroBT (WMF)]] ([[m:User talk:I JethroBT (WMF)|talk]]) ২০:১১, ৩০ সেপ্টেম্বর ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:I_JethroBT_(WMF)/IEG_2015_Targets&oldid=15939807-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:I JethroBT (WMF)@metawiki পাঠিয়েছেন -->
== Creative Commons 4.0 ==
Hello! I'm writing from the Wikimedia Foundation to invite you to give your feedback on a proposed move from CC BY-SA 3.0 to a CC BY-SA 4.0 license across all Wikimedia projects. The consultation will run from October 5 to November 8, and we hope to receive a wide range of viewpoints and opinions. Please, if you are interested, [[meta:Special:MyLanguage/Terms of use/Creative Commons 4.0|take part in the discussion on Meta-Wiki]].
''Apologies that this message is only in English. [[meta:Special:MyLanguage/Terms of use/Creative Commons 4.0/MassMessage|This message can be read and translated in more languages here]].'' [[User:JSutherland (WMF)|Joe Sutherland]] ([[User talk:JSutherland (WMF)|talk]]) ০১:৩৪, ৬ অক্টোবর ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:JSutherland_(WMF)/MassMessage/1&oldid=15962252-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:JSutherland (WMF)@metawiki পাঠিয়েছেন -->
== Password reset ==
''I apologise that this message is in English. [https://meta.wikimedia.org/w/index.php?title=Special:Translate&group=page-Security%2FPassword+reset&language=&action=page&filter= {{int:Centralnotice-shared-help-translate}}]''
We are having a problem with attackers taking over wiki accounts with privileged user rights (for example, admins, bureaucrats, oversighters, checkusers). It appears that this may be because of weak or reused passwords.
Community members are working along with members of multiple teams at the Wikimedia Foundation to address this issue.
In the meantime, we ask that everyone takes a look at the passwords they have chosen for their wiki accounts. If you know that you've chosen a weak password, or if you've chosen a password that you are using somewhere else, please change those passwords.
Select strong passwords – eight or more characters long, and containing letters, numbers, and punctuation. [[m:User:JSutherland (WMF)|Joe Sutherland]] ([[m:User talk:JSutherland (WMF)|{{int:Talkpagelinktext}}]]) / [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ২৩:৫৯, ১৩ নভেম্বর ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:JSutherland_(WMF)/MassMessage/1&oldid=16060701-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:JSutherland (WMF)@metawiki পাঠিয়েছেন -->
== Adding to the above section (Password reset) ==
Please accept my apologies - that first line should read "[https://meta.wikimedia.org/w/index.php?title=Special:Translate&group=page-Security%2FPassword+reset&language=&action=page&filter= Help with translations!]". [[m:User:JSutherland (WMF)|Joe Sutherland (WMF)]] ([[m:User talk:JSutherland (WMF)|talk]]) / [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ০০:১১, ১৪ নভেম্বর ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:JSutherland_(WMF)/MassMessage/1&oldid=16060701-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:JSutherland (WMF)@metawiki পাঠিয়েছেন -->
== New way to edit wikitext ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
'''Summary''': There's a new opt-in Beta Feature of a [[:mw:2017 wikitext editor|wikitext mode for the visual editor]]. Please [[Special:Preferences#mw-prefsection-betafeatures|go try it out]].
We in the Wikimedia Foundation's Editing department are responsible for making editing better for all our editors, new and experienced alike. We've been slowly improving [[:mw:VisualEditor|the visual editor]] based on feedback, user tests, and feature requests. However, that doesn't work for all our user needs: whether you need to edit a wikitext talk page, create a template, or fix some broken reference syntax, sometimes you need to use wikitext, and many experienced editors prefer it.
Consequently, we've planned a "wikitext mode" for the visual editor for a long time. It provides as much of the visual editor's features as possible, for those times that you need or want wikitext. It has the same user interface as the visual editor, including the same toolbar across the top with the same buttons. It provides access to the [[:mw:citoid|citoid service]] for formatting citations, integrated search options for inserting images, and the ability to add new templates in a simple dialog. Like in the visual editor, if you paste in formatted text copied from another page, then formatting (such as bolding) will automatically be converted into wikitext.
All wikis now have access to this mode as a [[:mw:Beta Features|Beta Feature]]. When enabled, it replaces your existing [[:mw:Editor|wikitext editor]] everywhere. If you don't like it, you can reverse this at any time by turning off the Beta Feature in your preferences. We don't want to surprise anyone, so it's strictly an ''opt-in-only'' Beta Feature. It won't switch on automatically for anyone, even if you have previously checked the box to "{{Int:Betafeatures-auto-enroll}}".
The new wikitext edit mode is based on the visual editor, so it requires JavaScript (as does the [[:mw:Extension:WikiEditor|current wikitext editor]]). It doesn't work with gadgets that have only been designed for the older one (and ''vice versa''), so some users will miss gadgets they find important. We're happy to [[:mw:VisualEditor/Gadgets|work with gadget authors to help them update their code to work]] with both editors. We're not planning to get rid of the current main wikitext editor on desktop in the foreseeable future. We're also not going to remove the existing ability to edit plain wikitext without JavaScript. Finally, though it should go without saying, if you prefer to continue using the current wikitext editor, then you may so do.
This is an early version, and we'd love to know what you think so we can make it better. Please leave feedback about the new mode [[:mw:2017 wikitext editor/Feedback|on the feedback page]]. You may write comments in any language. Thank you.
</div> [[:mw:User:Jdforrester (WMF)|James Forrester]] (Product Manager, Editing department, Wikimedia Foundation) --১৯:৩২, ১৪ ডিসেম্বর ২০১৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=15942009-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Elitre (WMF)@metawiki পাঠিয়েছেন -->
== Review of initial updates on Wikimedia movement strategy process ==
<div lang="en" dir="ltr" class="mw-content-ltr">
''Note: Apologies for cross-posting and sending in English. [[m:Strategy/Wikimedia movement/2017/Updates/Initial announcements review|Message is available for translation on Meta-Wiki]].''
The Wikimedia movement is beginning a movement-wide strategy discussion, a process which will run throughout 2017. For 15 years, Wikimedians have worked together to build the largest free knowledge resource in human history. During this time, we've grown from a small group of editors to a diverse network of editors, developers, affiliates, readers, donors, and partners. Today, we are more than a group of websites. We are a movement rooted in values and a powerful vision: all knowledge for all people. As a movement, we have an opportunity to decide where we go from here.
This movement strategy discussion will focus on the future of our movement: where we want to go together, and what we want to achieve. We hope to design an inclusive process that makes space for everyone: editors, community leaders, affiliates, developers, readers, donors, technology platforms, institutional partners, and people we have yet to reach. There will be multiple ways to participate including on-wiki, in private spaces, and in-person meetings. You are warmly invited to join and make your voice heard.
The immediate goal is to have a strategic direction by Wikimania 2017 to help frame a discussion on how we work together toward that strategic direction.
Regular updates are being sent to the [[mail:Wikimedia-l|Wikimedia-l mailing list]], and posted [[m:Strategy/Wikimedia_movement/2017/Updates|on Meta-Wiki]]. Beginning with this message, monthly reviews of these updates will be sent to this page as well. [[m:Strategy/Wikimedia movement/2017/Updates/Signup|Sign up]] to receive future announcements and monthly highlights of strategy updates on your user talk page.
Here is a review of the updates that have been sent so far:
* [[m:Strategy/Wikimedia movement/2017/Updates/15 December 2016 - Update 1 on Wikimedia movement strategy process|Update 1 on Wikimedia movement strategy process]] (15 December 2016)
** Introduction to process and information about budget spending resolution to support it
* [[m:Strategy/Wikimedia movement/2017/Updates/23 December 2016 - Update 2 on Wikimedia movement strategy process|Update 2 on Wikimedia movement strategy process]] (23 December 2016)
** Start of search for Lead Architect for movement strategy process
* [[m:Strategy/Wikimedia movement/2017/Updates/8 January 2017 - Update 3 on Wikimedia movement strategy process|Update 3 on Wikimedia movement strategy process]] (8 January 2017)
** Plans for strategy sessions at upcoming Wikimedia Conference 2017
* [[m:Strategy/Wikimedia movement/2017/Updates/11 January 2017 - Update 4 on Wikimedia movement strategy process|Update 4 on Wikimedia movement strategy process]] (11 January 2017)
** Introduction of williamsworks
* [[m:Strategy/Wikimedia movement/2017/Updates/2 February 2017 - Update 5 on Wikimedia movement strategy process|Update 5 on Wikimedia movement strategy process]] (2 February 2017)
** The core movement strategy team, team tracks being developed, introduction of the Community Process Steering Committee, discussions at WikiIndaba conference 2017 and the Wikimedia movement affiliates executive directors gathering in Switzerland
* [[m:Strategy/Wikimedia movement/2017/Updates/10 February 2017 - Update 6 on Wikimedia movement strategy process|Update 6 on Wikimedia movement strategy process]] (10 February 2017)
** Tracks A & B process prototypes and providing feedback, updates on development of all four Tracks
More information about the movement strategy is available on the [[m:Strategy/Wikimedia movement/2017|Meta-Wiki 2017 Wikimedia movement strategy portal]].
''Posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] on behalf of the [[m:Special:MyLanguage/Wikimedia Foundation|Wikimedia Foundation]], ২০:৩১, ১৫ ফেব্রুয়ারি ২০১৭ (ইউটিসি) • [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/Initial announcements review|{{int:please-translate}}]] • [[m:Talk:Strategy/Wikimedia movement/2017/Updates|Get help]]''
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=16297862-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== শীঘ্রই সব ভাষায় উইকিমিডিয়ার ভবিষ্যৎ আন্দোলন/কর্মপন্থার কৌশল নির্ধারণী আলোচনা শুরু হচ্ছে! ==
সুধী,<br>
শুভেচ্ছা নেবেন। গত ১৬ বছরে উইকিমিডিয়া আন্দোলন ছোট একটি স্বেচ্ছাসেবী দল থেকে ধীরে ধীরে বড় ও বৈচিত্রময় একটি সম্প্রদায়ে পরিণত হয়েছে যা এখন অবদানকারী, ডেভেলপার, স্থানীয় শাখা, পাঠক, দাতা ও আরও অন্যান্য সহযোগীদের নিয়ে সংঘবদ্ধভাবে পরিচালিত হচ্ছে। আগামী ১৫ বছরে আমরা নিজেদেরকে কোথায় দেখতে চাই? এটি মানবতার কল্যাণে ভবিষ্যতে কি করতে পারে? মোটের উপর, ২০৩০ সালে আমরা আমাদেরকে কোথায় দেখতে চাই? আমাদের কি কি প্রয়োজন বা কোন কাজগুলো করা প্রয়োজন; এগুলো নিয়ে কয়েকদিনের মধ্যেই সারা পৃথিবীর সব উইকিমিডিয়া সম্প্রদায়গুলো তাদের নিজেদের সুবিধামত তাদের নিজেদের ভাষায় আলোচনা করবে। এ সব আলোচনার কেন্দ্রীয় স্থান হবে [[:m:Strategy/Wikimedia movement/2017|মেটা উইকি]]।
আমাদের বাংলা সম্প্রদায় থেকেও এই আলোচনায় অংশ নেব নিজেদের সুবিধাজনক স্থানে (এই আলোচনাসভা, উইকিসংকেলনের আলোচনাসভা বা ফেইসবুক চ্যাট গ্রুপ অথবা হ্যাং আউটের মাধ্যমে)। আমার কাজ হলো, ছোট ও ক্রমবর্ধমাণ সম্প্রদায় হিসেবে আমাদের কথাগুলোও যাতে ফাউন্ডেশনের সেই ভবিষ্যৎ পরিকল্পনায় স্থান করে নেয় সেটি নিশ্চিত করা।
এ ব্যাপারে বিস্তারিত সামনের দিনগুলোতে জানানো হবে। তবে, এখন কেউ ইচ্ছে করলে বাংলা পাঠকদের জন্য [[:m:Strategy/Wikimedia movement/2017|এই মেটা পাতাসহ]] অন্যান্য উপপাতাগুলো অনুবাদে সাহায্য করতে পারেন। আপনাদের এই প্রক্রিয়া সম্পর্কে বা অন্য যেকোন প্রকার সাহায্যে আমাকে প্রশ্ন করতে পারেন আমার আলাপ পাতায় অথবা ইমেইলের মাধ্যমে। ধন্যবাদ। – [[User:NahidSultan (WMF)|NahidSultan (WMF)]] ([[User Talk:NahidSultan (WMF)|আলাপ]]) ১৮:১৩, ৯ মার্চ ২০১৭ (ইউটিসি)
== Overview #2 of updates on Wikimedia movement strategy process ==
<div lang="en" dir="ltr" class="mw-content-ltr">
''Note: Apologies for cross-posting and sending in English. [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/Overview 2 of updates on Wikimedia movement strategy process|This message is available for translation on Meta-Wiki]].''
As we mentioned last month, the Wikimedia movement is beginning a movement-wide strategy discussion, a process which will run throughout 2017. This movement strategy discussion will focus on the future of our movement: where we want to go together, and what we want to achieve.
Regular updates are being sent to the [[mail:Wikimedia-l|Wikimedia-l mailing list]], and posted [[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017/Updates|on Meta-Wiki]]. Each month, we are sending overviews of these updates to this page as well. [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/Signup|Sign up]] to receive future announcements and monthly highlights of strategy updates on your user talk page.
Here is a overview of the updates that have been sent since our message last month:
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/16 February 2017 - Update 7 on Wikimedia movement strategy process|Update 7 on Wikimedia movement strategy process]] (16 February 2017)
** Development of documentation for Tracks A & B
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/24 February 2017 - Update 8 on Wikimedia movement strategy process|Update 8 on Wikimedia movement strategy process]] (24 February 2017)
** Introduction of Track Leads for all four audience tracks
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/2 March 2017 - Update 9 on Wikimedia movement strategy process|Update 9 on Wikimedia movement strategy process]] (2 March 2017)
** Seeking feedback on documents being used to help facilitate upcoming community discussions
More information about the movement strategy is available on the [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017|Meta-Wiki 2017 Wikimedia movement strategy portal]].
''Posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] on behalf of the [[m:Special:MyLanguage/Wikimedia Foundation|Wikimedia Foundation]], ১৯:৪৪, ৯ মার্চ ২০১৭ (ইউটিসি) • [[m:Strategy/Wikimedia movement/2017/Updates/Overview 2 of updates on Wikimedia movement strategy process|{{int:please-translate}}]] • [[m:Talk:Strategy/Wikimedia movement/2017/Updates|Get help]]''
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=16350625-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== We invite you to join the movement strategy conversation (now through April 15) ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
: ''এই বার্তা, "[[mailarchive:wikimediaannounce-l/2017-March/001383.html|আমরা আপনাকে আন্দোলন কৌশল আলোচনায় অংশ নেওয়ার অনুরোধ করছি (এখন থেকে ১৫ এপ্রিল পর্যন্ত)]]", [[m:User:GVarnum-WMF|গ্রেগরি ভারনোম]] কর্তৃক ২০১৭ সালের ১৫ ও ১৬ই মার্চ বিভিন্ন আলোচনাসভা, অ্যাফিলিয়েট আলাপ পাতা, আন্দোলনের মেইলিং লিস্ট ও গণবার্তা ব্যবহার করে পাঠানো হয়েছিল। একই ধরণের বার্তা [[m:User:Nicole_Ebber_(WMDE)|নিকোল ইবর]] কর্তৃক ২০১৭ সালের ১৫ই মার্চ সংগঠিত দলসমূহের বিভিন্ন মেইলিং লিস্টে পাঠানো হয়েছে। বার্তার এই সংস্করণটি নথিভূক্ত ও অনুবাদ করার কাজে ব্যবহার করা যাবে''
সুপ্রিয় উইকিমিডিয়ান/উইকিপিডিয়ান:
আজকে আমরা বিশ্বে উইকিমিডিয়ার ভবিষ্যৎ রুল ঠিক করার জন্য এবং সেই রুলটি বাস্তবায়ন করার জন্য একসাথে সম্প্রদায়ব্যাপী আলোচনা শুরু করেছি। আপনাকে আলোচনায় অংশ নেওয়ার জন্য সাদরে আমন্ত্রণ জানাচ্ছি।
আলোচনায় অংশ নেওয়ার বিভিন্ন পন্থা রয়েছে, আপনি ইতিমধ্যে শুরু হওয়া কোন আলোচনায় অংশ নিতে পারেন অথবা নিজেই একটি আলোচনা শুরু করতে পারেন।
[[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017/Track_A|ট্র্যাক এ (শুধুমাত্র সংগঠিত দলের জন্য)]]: আপনার অ্যাফিলিয়েট, অন্য কোন কমিটি বা সংগঠিত দলের সাথে আলোচনা (এগুলো হল সংগঠিত দল যারা আন্দোলন পরিচালনা করছে)।
ট্র্যাক বি (সতন্ত্র অবদানকারী): [[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017/Cycle_1|মেটাতে]] বা আপনার [[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017/Participate|স্থানীয় ভাষায় বা প্রকল্পে]]।
তিনটি ধাপের আলোচনার এটি প্রথম ধাপ এবং আলোচনা চলবে ১৫ই এপ্রিল পর্যন্ত। প্রথম ধাপের আলোচনার উদ্দেশ্য হল, উইকিমিডিয়া আন্দোলনের ভবিষ্যৎ সম্পর্কে আলোচনা ও দিকগুলো পর্যালোচনা করা। পরবর্তী ১৫ বছরে আমরা একসাথে কি গড়তে চাই বা কি অর্জন করতে চাই?
আমরা যেহেতু একসাথে আলোচনা শুরু ও প্রক্রিয়াটি তৈরি করেছি তাই আপনাকেও স্বাগতম ও আমরা বৈচিত্র্যময় অংশগ্রহণ আশা করছি।
* [[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017|আন্দোলন কৌশল প্রক্রিয়া সম্পর্কে আরও জানুন]]
* [[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017/Toolkit/Discussion_Coordinator_Role|স্বেচ্ছাসেবী আলোচনা সমন্বয়ক হতে কি করতে হবে দেখুন]]
বিনীত,
নিকোল ইবর (ট্র্যাক এ লিড), জ্যামি এন্সটি (ট্র্যাক বি লিড), এবং [[m:Special:MyLanguage/Strategy/Wikimedia_movement/2017/People|অ্যানগেইজমেন্ট সাপোর্ট দল]]</div></div> ০৫:০৯, ১৮ মার্চ ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Strategy/Wikimedia_movement/2017/Updates/Global_message_delivery&oldid=16453957-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== উইকিমিডিয়া আন্দোলনের কৌশল নির্ধারণী আলোচনা শুরু হয়েছে, আপনার মতামত প্রয়োজন! ==
সুপ্রিয় সবাই,</br> আপনারা ইতিমধ্যেই জানেন যে, ২০৩০ সাল পর্যন্ত আমরা উইকিমিডিয়া ও এর আন্দোলনকে কিভাবে দেখতে চাই সে সম্পর্কিত একটি আলোচনা বিভিন্ন ভাষায় শুরু হওয়ার কথা ছিল। আলোচনাটি শুরু হয়েছে এবং বাংলা সম্প্রদায়ের একজন হিসেবে আপনি উইকিমিডিয়া প্রকল্পকে আগামী ১৫ বছরে কিভাবে দেখতে চান সে সম্পর্কে মতামত দিতে পারেন বাংলা উইকিপিডিয়ার '''[[:w:WP:WM2030|এই আলোচনা পাতায়]]'''। বিশ্বব্যাপী উইকিমিডিয়া সম্প্রদায়গুলো যে প্রশ্নের উত্তর অনুসন্ধান করছেন সেটি হল, <br><center>'''আগামী ১৫ বছরে আমরা নিজেদের কোথায় দেখতে চাই এবং একসাথে কি অর্জন করতে চাই?'''</center>
উপরের প্রশ্নের সম্ভাব্য উত্তর পাওয়া যেতে পারে নিচের প্রশ্নগুলোর মাধ্যমে:
* পরবর্তী ১৫ বছর একসাথে কাজ করতে আমাদের কি ধরণের চালিকা শক্তির প্রয়োজন?
* পরবর্তী ১৫ বছরে নিজেদের মধ্যে যোগাযোগ বৃদ্ধি ও কাজের উৎসাহের জন্য কি প্রয়োজন হতে পারে?
* আমাদের উন্নয়ন পরবর্তী ১৫ বছরে কিভাবে দ্রুততর করা যায়?
* পরবর্তী ১৫ বছরে আমরা কিসের মাধ্যমে সবচেয়ে বেশি পরিচিত হতে চাই?
উইকিমিডিয়া প্রকল্পের ভাষাসমূহের মধ্যে বাংলা একটি অন্যতম গুরুত্বপূর্ণ ভাষা। বাংলা সম্প্রদায়ের একজন হিসেবে আপনার মতামতও তাই উইকিমিডিয়া আন্দোলনে গুরুত্বপূর্ণ। আপনার এ বিষয়ে কোন প্রশ্ন থাকলে আমাকে [[:বিশেষ:ইমেইল_প্রেরণ/NahidSultan_(WMF)|ইমেইল]] করতে পারেন অথবা আমার [[ব্যবহারকারী আলাপ:NahidSultan (WMF)|আলাপ পাতার]] মাধ্যমে করতে পারেন। ধন্যবাদ। – [[User:NahidSultan (WMF)|NahidSultan (WMF)]] ([[User Talk:NahidSultan (WMF)|আলাপ]]) ১৫:৫৫, ১৮ মার্চ ২০১৭ (ইউটিসি)
== [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Board of Trustees elections|Start of the 2017 Wikimedia Foundation Board of Trustees elections]] ==
<div lang="en" dir="ltr" class="mw-content-ltr">
''Please accept our apologies for cross-posting this message. [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Board of Trustees elections|This message is available for translation on Meta-Wiki]].''
[[File:Wikimedia-logo black.svg|right|150px|link=m:Special:MyLanguage/Wikimedia Foundation elections/2017]]
On behalf of the Wikimedia Foundation Elections Committee, I am pleased to announce that self-nominations are being accepted for the [[m:Special:MyLanguage/Wikimedia_Foundation_elections/2017/Board_of_Trustees/Call_for_candidates|2017 Wikimedia Foundation Board of Trustees Elections]].
The [[m:Special:MyLanguage/Wikimedia Foundation Board of Trustees|Board of Trustees]] (Board) is the decision-making body that is ultimately responsible for the long-term sustainability of the Wikimedia Foundation, so we value wide input into its selection. More information about this role can be found [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Board of Trustees|on Meta-Wiki]]. Please read the [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Board of Trustees/Call for candidates|letter from the Board of Trustees calling for candidates]].
'''The [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Board of Trustees/Candidates|candidacy submission phase]] will last from April 7 (00:00 UTC) to April 20 (23:59 UTC).'''
'''We will also be accepting questions to ask the candidates from April 7 to April 20. [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Board of Trustees/Questions|You can submit your questions on Meta-Wiki]].'''
Once the questions submission period has ended on April 20, the Elections Committee will then collate the questions for the candidates to respond to beginning on April 21.
The goal of this process is to fill the '''three community-selected seats''' on the Wikimedia Foundation Board of Trustees. The election results will be used by the Board itself to select its new members.
The full schedule for the Board elections is as follows. All dates are '''inclusive''', that is, from the beginning of the first day (UTC) to the end of the last.
* April 7 (00:00 UTC) – April 20 (23:59 UTC) – '''Board nominations'''
* April 7 – April 20 – '''Board candidates questions submission period'''
* April 21 – April 30 – '''Board candidates answer questions'''
* May 1 – May 14 – '''Board voting period'''
* May 15–19 – '''Board vote checking'''
* May 20 – '''Board result announcement goal'''
In addition to the Board elections, we will also soon be holding elections for the following roles:
* '''Funds Dissemination Committee (FDC)'''
** There are five positions being filled. More information about this election will be available [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee|on Meta-Wiki]].
* '''Funds Dissemination Committee Ombudsperson (Ombuds)'''
** One position is being filled. More information about this election will be available [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee Ombudsperson|on Meta-Wiki]].
Please note that this year the Board of Trustees elections will be held before the FDC and Ombuds elections. Candidates who are not elected to the Board are explicitly permitted and encouraged to submit themselves as candidates to the FDC or Ombuds positions after the results of the Board elections are announced.
More information on this year's elections can be found [[m:Special:MyLanguage/Wikimedia Foundation elections/2017|on Meta-Wiki]]. Any questions related to the election can be posted on the [[m:Talk:Wikimedia Foundation elections/2017|election talk page on Meta-Wiki]], or sent to the election committee's mailing list, <tt dir="ltr" style="white-space:nowrap;font-size:12px;line-height:1.5">board-elections[[File:At sign.svg|15x15px|middle|link=|alt=(at)]]wikimedia.org</tt>.
On behalf of the Election Committee,<br />
[[m:User:KTC|Katie Chan]], Chair, [[m:Special:MyLanguage/Wikimedia Foundation elections committee|Wikimedia Foundation Elections Committee]]<br />
[[m:User:JSutherland (WMF)|Joe Sutherland]], Community Advocate, Wikimedia Foundation
''Posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|MediaWiki message delivery]] on behalf of the [[m:Special:MyLanguage/Wikimedia Foundation elections committee|Wikimedia Foundation Elections Committee]], ০৩:৩৬, ৭ এপ্রিল ২০১৭ (ইউটিসি) • [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Board of Trustees elections|{{int:please-translate}}]] • [[m:Talk:Wikimedia Foundation elections/2017|Get help]]''</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=16441214-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== Read-only mode for 20 to 30 minutes on 19 April and 3 May ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
[[:m:Special:MyLanguage/Tech/Server switch 2017|অন্য আরেকটি ভাষায় এটি পড়ুন]] • {{int:please-translate}}
[[foundation:|উইকিমিডিয়া ফাউন্ডেশন]] ডালাসে অবস্থিত তার গৌন উপাত্ত কেন্দ্রটি পরীক্ষা করবে। এটি উইকিপিডিয়া এবং উইকিমিডিয়ার অন্যান্য উইকিসমূহকে একটি দুর্যোগের পরেও অনলাইনে থাকা নিশ্চিত করবে। সবকিছু কাজ করছে তা নিশ্চিত করতে, উইকিমিডিয়ার প্রযুক্তি বিভাগ একটি পরিকল্পিত পরীক্ষা পরিচালনা করবে। এই পরীক্ষাটি প্রদর্শন করবে যে তাঁদের নির্ভরযোগ্যভাবে একটি উপাত্ত কেন্দ্র থেকে অন্য উপাত্ত কেন্দ্রে পরিবর্তন করা যাবে কিনা। এটির জন্য অনেক দলকে পরীক্ষার জন্য প্রস্তুত করা প্রয়োজন এবং অপ্রত্যাশিত সমস্যা সমাধানের জন্য প্রস্তুত থাকা প্রয়োজন।
তারা নতুন উপাত্ত কেন্দ্রে '''১৯ এপ্রিল ২০১৭, বুধবারে''' সকল ট্রাফিক নিয়ে যাবে।
'''৩ মে ২০১৭, বুধবারে''', তাঁরা আবার প্রাথমিক উপাত্ত কেন্দ্রে ফিরে আসবে।
দুর্ভাগ্যবশত, [[mw:Manual:What is MediaWiki?|মিডিয়াউইকির]] কিছু সীমাবদ্ধতার কারণে, ঐ দুটি পরিবর্তনের সময় সব সম্পাদনা অবশ্যই বন্ধ রাখতে হবে। এই ব্যাঘাত ঘটানোর জন্য জন্য আমরা ক্ষমাপ্রার্থী, এবং আমরা ভবিষ্যতে এটিকে হ্রাস করার জন্য কাজ করছি।
'''সব উইকিতে অল্প সময়ের জন্য, আপনি পড়তে সক্ষম হতে হবে, কিন্তু সম্পাদনা করতে পারবেন না।'''
*আপনি ১৯ এপ্রিল, বুধবার ও ৩ মে, বুধবারে প্রায় ২০ থেকে ৩০ মিনিট সম্পাদনা করতে পারবেন না। পরীক্ষাটি শুরু হবে বাংলাদেশ সময় [https://www.timeanddate.com/worldclock/fixedtime.html?iso=20170419T14 রাত ৮টায়] (১৪:০০ ইউটিসি)।
*এই সময়ে আপনি যদি সম্পাদনা বা সংরক্ষণ করার চেষ্টা করেন, তাহলে আপনি একটি ত্রুটির বার্তা দেখতে পাবেন। আমরা আশা করি যে কোন সম্পাদনা এই সময়ের মধ্যে নষ্ট হবে না, কিন্তু আমরা তার নিশ্চয়তা দিতে পারছি না। আপনি যদি ত্রুটির বার্তা দেখতে পান, তাহলে অনুগ্রহ করে অপেক্ষা করুন যতক্ষণ না সবকিছু স্বাভাবিক অবস্থায় ফিরে আসছে। এরপর আপনি আপনার সম্পাদনা সংরক্ষণ করতে সক্ষম হবেন। কিন্তু, আমরা আপনাকে প্রথমে আপনার পরিবর্তনের একটি অনুলিপি করে রাখতে সুপারিশ করছি।
''অন্যান্য প্রভাব'':
*পটভূমির কাজ ধীর হবে এবং কিছু নাও কাজ করতে পারে। লাল লিঙ্ক স্বাভাবিকের মত দ্রুত হালনাগাদ নাও হতে পারে। আপনি যদি একটি নিবন্ধ তৈরি করেন যা ইতিমধ্যে অন্য কোথাও সংযুক্ত আছে, সেক্ষেত্রে লিংক স্বাভাবিকের চেয়ে বেশি সময় ধরে লাল থাকবে। কিছু দীর্ঘ চলমান স্ক্রিপ্ট বন্ধ করতে হবে।
*১৭ এপ্রিল ২০১৭ ও ১ মে ২০১৭ এই দুই সপ্তাহের সময়কালীন একটি কোড বাধাদান থাকবে। কোন অ-অপরিহার্য কোড স্থাপন সঞ্চালিত হবে না।
যদি প্রয়োজন হয় তাহলে এই প্রকল্পটি স্থগিত করা হতে পারে। আপনি [[wikitech:Switch Datacenter#Schedule for 2017 switch|wikitech.wikimedia.org তে সময়সূচী পড়তে পারেন]]। যেকোনো পরিবর্তন সময়সূচীতে ঘোষণা করা হবে। এই সম্পর্কে আরো বিজ্ঞপ্তি দেয়া হবে। '''দয়া করে আপনার সম্প্রদায়কে এই তথ্যটি জানান।''' /<span dir=ltr>[[m:User:Whatamidoing (WMF)|User:Whatamidoing (WMF)]] ([[m:User talk:Whatamidoing (WMF)|talk]])</span>
</div></div>[[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ১৭:৩৪, ১১ এপ্রিল ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=16545942-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Cognate & automatic interlanguage links ==
Hello all,
''(I'm sorry to write in English, feel free to translate in your own language below)''
From '''April 24th''', a new interlanguage link system will be deployed on all Wiktionaries. This extension, '''[[mw:Special:MyLanguage/Extension:Cognate|Cognate]]''', automatically links the pages with the same title between the Wiktionaries. This means they no longer have to be added in the pages of the main namespace.
This new feature has been developed by Wikimedia Deutschland as the first step of the project [[d:Special:MyLanguage/Wikidata:Wiktionary|Wikidata for Wiktionary]], but does not rely on Wikidata.
To allow the feature to operate, all the former interlanguage links have to be removed from the wikitext. You can do this by using a bot, as it was done on Wikipedia in the past. If you leave them in they will overwrite the automatic links.
During the development we had a lot of discussions with Wiktionary editors to understand their needs, but it's possible that some automatic links don't work as you would expect. If you find some bugs or have suggestions for improvements, feel free to add a sub-task on [[phab:T987|Phabricator]] or add a message on [[mw:Extension talk:Cognate|this talk page]].
Thanks, [[user:Lea Lacroix (WMDE)|Lea Lacroix (WMDE)]] ([[talk user:Lea Lacroix (WMDE)|talk]])
<!-- https://meta.wikimedia.org/w/index.php?title=User:Lea_Lacroix_(WMDE)/List_Cognate_english&oldid=16585986-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== [https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/341?setlang={{CONTENTLANG}} Voting has begun in 2017 Wikimedia Foundation Board of Trustees elections] ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr">[[File:Wikimedia-logo black.svg|{{#switch:{{CONTENTLANG}}|ar=left|he=left|right}}|125px|link=m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Board voting has begun]]''এই বার্তাটি [[m:Special:MyLanguage/Wikimedia Foundation elections committee|উইকিমিডিয়া ফাউন্ডেশন নির্বাচন কমিশন]] কর্তৃক প্রেরিত ও [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Board voting has begun|অনুবাদের]] জন্য উন্মুক্ত।''
''[[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Board of Trustees|উইকিমিডিয়া ফাউন্ডেশন ট্রাস্টি বোর্ড]] নির্বাচনে [[m:Wikimedia Foundation elections/2017#Requirements|যোগ্য ভোটারদের]] [https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/341?setlang={{CONTENTLANG}}&uselang={{CONTENTLANG}} ভোট গ্রহণ] শুরু হয়েছে।
[[m:Wikimedia Foundation Board of Trustees|উইকিমিডিয়া ফাউন্ডেশন বোর্ড অব ট্রাস্টি]] হল যুক্তরাষ্ট্রের উইকিমিডিয়া ফাউন্ডেশনের (৫০১(সি)(৩) স্বেচ্ছাসেবী সংস্থা) সর্বোচ্চ নীতিনির্ধারণী দল। উইকিমিডিয়া ফাউন্ডেশন উইকিপিডিয়া ও কমন্সসহ আরও অনেক বৈচিত্র্যময় প্রকল্প পরিচালনা করে থাকে।
ভোটিং ১লা মে ০০:০০ ইউটিসি থেকে শুরু হয়ে ১৪ই মে ২৩:৫৯ ইউটিসি পর্যন্ত চলবে। ভোট দেওয়ার জন্য '''[https://meta.wikimedia.org/wiki/Special:SecurePoll/vote/341?setlang={{CONTENTLANG}}&uselang={{CONTENTLANG}} এখানে] ক্লিক করুন। নির্বাচনের প্রবেশদ্বারটি পাওয়া যাবে [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Board of Trustees|২০১৭ বোর্ড অব ট্রাস্টি নির্বাচনের এই মেটা পাতায়]]।
নির্বাচন কমিশনের পক্ষ থেকে,<br/>
[[m:User:KTC|ক্যাটি চেইন]], চেয়ার, [[m:Special:MyLanguage/Wikimedia Foundation elections committee|উইকিমিডিয়া ফাউন্ডেশন নির্বাচন কমিশন]]<br/>
[[m:User:JSutherland (WMF)|জো সাটারল্যান্ড]], সম্প্রদায়ের উকিল, উইকিমিডিয়া ফাউন্ডেশন
বার্তাটি [[m:Special:MyLanguage/User:MediaWiki message delivery|মিডিয়িউইকি বার্তাবাহক কর্তৃক]] পোস্ট করা হয়েছে • [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Board voting has begun|অনুবাদ]] • [[m:Talk:Wikimedia Foundation elections/2017|সাহায্য]]</div> ১৯:১৪, ৩ মে ২০১৭ (ইউটিসি)''
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=16683836-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== Beta Feature Two Column Edit Conflict View ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
From May 9, the [[mw:Special:MyLanguage/Help:Two_Column_Edit_Conflict_View|Two Column Edit Conflict View]] will be available as a [[mw:Special:MyLanguage/Beta Features|beta feature]] on all wikis. The Two Column Edit Conflict View is a new interface for the edit conflict resolution page. It highlights differences between the editor's and the conflicting changes to make it easy to copy and paste pieces of the text and resolve the conflict. The feature fulfils a request for a more user-friendly edit conflict resolution from the [[m:WMDE Technical Wishes|German Community’s Technical Wishlist]]. Everyone is invited to test the feature and we hope that it will serve you well! </div> [[m:user: Birgit Müller (WMDE)|Birgit Müller (WMDE)]] ১৪:২৮, ৮ মে ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=WMDE_Technical_Wishes/Technical_Wishes_News_list_1&oldid=16712210-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Birgit Müller (WMDE)@metawiki পাঠিয়েছেন -->
== RevisionSlider ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
[[mw:Special:MyLanguage/Extension:RevisionSlider|RevisionSlider]] will be available as a default feature for all users on all wikis from May 17. The RevisionSlider adds a slider view to the diff page so that you can easily move between revisions. The slider view is collapsed by default, and will load by clicking on it. It can also be turned off entirely in the user preferences. RevisionSlider has been a default feature on German, Arabic and Hebrew Wikipedia for 6 months and a beta feature on all wikis for 8 months. The feature fulfills a wish from the [[m:WMDE Technical Wishes|German Community’s Technical Wishlist]]. Thanks to everyone who tested RevisionSlider and gave valuable feedback to improve the feature! We hope that RevisionSlider will continue to serve you well in your work. </div> [[m:user:Birgit Müller (WMDE)|Birgit Müller (WMDE)]] ১৪:৩৯, ১৬ মে ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=WMDE_Technical_Wishes/Technical_Wishes_News_list_1&oldid=16763498-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Birgit Müller (WMDE)@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2|Join the next cycle of Wikimedia movement strategy discussions (underway until June 12)]] ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr">
:''[[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/Cycle 2 discussions launch|মেটা উইকিতে এই বার্তাটি অনুবাদ করা যাবে]]''
[[File:Wikimedia-logo.svg|{{#switch:{{CONTENTLANG}}|ar=left|he=left|right}}|150px]]
উইকিমিডিয়া ফাউন্ডেশনের মূল ও কার্যকরী কৌশলী দল প্রথম পর্বের আলোচনা থেকে প্রাপ্ত ১৮০০-এর বেশি থিমেটিক মন্তব্য পর্যালোচনা করে সেগুলোর প্রতিবেদন প্রকাশ করেছে। তারা এই আলোচনাগুলো থেকে [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2|৫টি থিম নির্ধারণ করেছেন]] যেগুলো প্রায় সব সম্প্রদায়ের আলোচনাতেই উঠে এসেছে। এগুলো সর্বশেষ কৌশলী থিম মন্তব্য নয় তবে আলোচনা করার জন্য প্রধান কিছু ধারণা।
এই ৫টি থিমের উপর চলমান আলোচনায় অনলাইনে বা অফলাইনে [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Participate|বিতর্ক]] করতে আপনাকে স্বাগতম। এই পর্বের আলোচনাটি এখন থেকে জুনের ১২ তারিখ পর্যন্ত চলবে। আপনি এই ৫টির যেকোনটির উপর মতামত দিতে পারেন তবে আমরা আপনাকে আপনার জন্য সবচেয়ে উপযুক্ত থিমটির উপর মন্তব্য করতে অনুরোধ করছি।
এখানে ৫টি থিম মন্তব্য পাবেন, প্রতিটি থিমের জন্য মেটা উইকিতে আরও বিস্তারিত রয়েছে। এছাড়াও আপনি পাবেন কিভাবে প্রতিটি থিমের আলোচনা অংশ নিতে পারেন।
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2/Healthy, Inclusive Communities|স্বাস্থ্যকর, সর্বব্যাপী সম্প্রদায়]]
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2/The Augmented Age|উদ্দীপিত সময়]]
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2/A Truly Global Movement|একটি সত্যিকারের বৈশ্বিক আন্দোলন]]
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2/The Most Respected Source of Knowledge|জ্ঞানের সর্বাধিক স্বীকৃত উৎস]]
* [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Cycle 2/Engaging in the Knowledge Ecosystem|জ্ঞানের বাস্তুতন্ত্রে জড়িত হওয়া]]
[[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Participate|মেটা উইকির আন্দোলন কৌশল পাতায়]] আপনি এই থিমসমূহ সম্পর্কে বিস্তারিত জানতে পারবেন ও কিভাবে অংশগ্রহণ করবেন সেটিও জানতে পারবেন।
''[[m:Special:MyLanguage/User:MediaWiki message delivery|মিডিয়াউইকি বার্তা প্রদান]] সরঞ্জাম ব্যবহার করে বার্তাটি [[m:Special:MyLanguage/Wikimedia Foundation|উইকিমিডিয়া ফাউন্ডেশন]] কর্তৃক প্রদান করা হয়েছে • [[m:Special:MyLanguage/Strategy/Wikimedia movement/2017/Updates/Cycle 2 discussions launch|{{int:please-translate}}]] • [[m:Talk:Strategy/Wikimedia movement/2017/Updates|সাহায্য]]''</div> ২১:১০, ১৬ মে ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Strategy/Wikimedia_movement/2017/Updates/Global_message_delivery&oldid=16773425-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Funds Dissemination Committee elections|Start of the 2017 Wikimedia Foundation Funds Dissemination Committee elections]] ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr">[[File:Wikimedia-logo black.svg|{{#switch:{{CONTENTLANG}}|ar=left|he=left|right}}|125px|link=m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Funds Dissemination Committee elections]]
:''[[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Funds Dissemination Committee elections|মেটা উইকিতে এই বার্তার অনুবাদ রয়েছে]]।''
উইকিমিডিয়া ফাউন্ডেশন নির্বাচন কমিশনের পক্ষ থেকে আমরা আনন্দের সাথে জানাচ্ছি যে [[m:Wikimedia Foundation elections/2017/Funds Dissemination Committee/Call for candidates|উইকিমিডিয়া ফাউন্ডেশনের ফান্ড ডিসেমিনেশন কমিটি]] ও [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee Ombudsperson|ফান্ড ডিসেমিনেশন অববাডস পার্সনের]] জন্য প্রার্থীতা জমা নেওয়া হচ্ছে। দয়া করে [[m:Wikimedia Foundation elections/2017/Funds Dissemination Committee/Call for candidates|২০১৭ উইকিমিডিয়া ফাউন্ডেশন নির্বাচন প্রবেশদ্বারে]] উইকিমিডিয়া ফাউন্ডেশন কর্তৃক সরবরাহ করা পত্রটি পড়ুন।
''ফান্ড ডিসেমিনেশন কমিটি''<br />
ফান্ড ডিসেমিনেশন কমিটি (এফডিসি) উইকিমিডিয়ার অনুদান সমূহ যোগ্য আবেদনকারীর জন্য বারদ্দ করার কাজটি করে থাকে। এই নির্বাচনে ৫টি পদে প্রার্থী নেওয়া হবে। আরও জানতে পড়ুন, [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee|এফডিসি নির্বাচন পাতা]]
''ফান্ড ডিসেমিনেশন অমবাডসপার্সন''<br />
ফান্ড ডিসেমিনেশন অমবাডসপার্সন পুরো এফডিসি প্রক্রিয়ার উপর নজর রাখে এবং কোন নির্দিষ্ট অভিযোগ পেলে সেটি বোর্ড অব ট্রাস্টিদের হয়ে পর্যালোচনা করে বার্ষিক প্রতিবেদন প্রস্তুত করে। এই পদে একটি ফাঁকা আসনে প্রার্থী নেওয়া হবে। আরও জানতে দেখুন [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee Ombudsperson|এফডিসি অমবাডসপার্সন নির্বাচন]]।
'''২৮শে মে (২৩:৫৯ ইউটিসি) পর্যন্ত [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee/Candidates|প্রার্থীতা]] জমা দেওয়া যাবে।'''
'''এছাড়াও আমরা ২৮শে মে পর্যন্ত প্রার্থীদের কাছে করা বিভিন্ন প্রশ্ন গ্রহণ করবো। [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Funds Dissemination Committee/Questions|আপনি আপনার প্রশ্ন মেটাতে জমা দিতে পারেন]]।''' ২৮শে মে যখন প্রশ্ন জমাদান শেষ হবে তখন নির্বাচন কমিশন সব প্রশ্নগুলোকে একত্র করবে।
এই নির্বাচনের লক্ষ্য হল উইকিমিডিয়া ফাউন্ডেশনের ফান্ড ডিসেমিনেশন কমিটিতে সম্প্রদায় সদস্যদের ৫টি আসন ও অমবাডসপার্সনের একটি আসন পূর্ণ করা। নির্বাচনের ফলাফল ব্যবহার করে বোর্ড অব ট্রাস্টিগণ কমিটিতে নিয়োগ প্রদান করবেন।
এফডিসি নির্বাচন প্রক্রিয়র সময়সূচি নিচে দেওয়া হয়েছে। নিচের উল্লেখিত তারিখে শুরু হবে এবং উল্লেখিত তারিখে (ইউটিসি) শেষ হবে।
* মে ১৫ (০০:০০ ইউটিসি) – মে ২৮ (২৩:৫৯ ইউটিসি) – '''প্রার্থীতা জমাদান'''
* মে ১৫ – মে ২৮ – '''প্রার্থীদের কাছে প্রশ্ন করার সময়'''
* মে ২৯ – জুন ২ – '''প্রার্থীরা প্রশ্নের উত্তর দেবেন'''
* জুন ৩ – জুন ১১ – '''ভোটগ্রহণ শুরু'''
* জুন ১২–১৪ – '''ভোট চেক করা'''
* জুন ১৫ – '''নির্বাচনের ফলাফল ঘোষণার সম্ভাব্য তারিখ'''
এ বছরের নির্বাচন সম্পর্কে আরও তথ্য পাওয়া যাবে [[m:Special:MyLanguage/Wikimedia Foundation elections/2017|২০১৭ উইকিমিডিয়া ফাউন্ডেশন নির্বাচন প্রবেশদ্বারে]]
আপনি চাইলে এই নির্বাচনের সংবাদ আপনার প্রকল্পের আলোচনাসভায় আলোচনা করতে পারেন। কোন প্রকার প্রশ্ন বা মন্তব্য মেটার নির্বচন পাতার আলাপ পাতায় করা যেতে পারে অথবা <tt dir="ltr" style="white-space:nowrap;font-size:12px;line-height:1.5">board-elections[[File:At sign.svg|15x15px|middle|link=|alt=(at)]]wikimedia.org</tt> ঠিকানায় নির্বাচন কমিশনকে মেইল করতে পারেন।
নির্বাচন কমিশনের পক্ষে,<br />
[[m:User:KTC|ক্যাটি চ্যান]], প্রধান, [[m:Special:MyLanguage/Wikimedia Foundation elections committee|উইকিমিডিয়া ফাউন্ডেশন নির্বাচন কমিশন]]<br />
[[m:User:JSutherland (WMF)|জো সাটারল্যান্ড]], কমিউনিটি অ্যাডভোকেট, উইকিমিডিয়া ফাউন্ডেশন
''[[m:Special:MyLanguage/User:MediaWiki message delivery|মিডিয়াউইকি বার্তাবাহক কর্তৃক প্রকাশিত]] • [[m:Special:MyLanguage/Wikimedia Foundation elections/2017/Updates/Start of the 2017 Wikimedia Foundation Funds Dissemination Committee elections|অনুবাদ]] • [[m:Talk:Wikimedia Foundation elections/2017|সাহায্য]]''''</div> ২১:০৬, ২৩ মে ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=16804695-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:GVarnum-WMF@metawiki পাঠিয়েছেন -->
== Enable sitelinks on Wikidata for Wiktionary pages ==
<div lang="en" dir="ltr" class="mw-content-ltr">Hello,
I’m sorry to write this message in English. Please help us [[d:Special:MyLanguage/Wikidata:Wiktionary/Sitelinks|translating the full message here]]!
Short version: From June 20th, we are going to store the interwiki links of all the namespaces (except main, user and talk) in Wikidata. This will not break your Wiktionary, but if you want to use all the features, you will have to remove your sitelinks from wikitext and connect your pages to Wikidata.
[[d:Special:MyLanguage/Wikidata:Wiktionary/Sitelinks|Long version available and translatable here.]]
If you have any question or concern, feel free to ping me.
Thanks, [[m:User:Lea Lacroix (WMDE)|Lea Lacroix (WMDE)]] ([[m:User talk:Lea Lacroix (WMDE)|talk]]) ০৮:৩৮, ১ জুন ২০১৭ (ইউটিসি)</div>
<!-- https://meta.wikimedia.org/w/index.php?title=User:Lea_Lacroix_(WMDE)/List_Cognate_english&oldid=16834421-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== Sitelinks are enabled on Wikidata for Wiktionary pages (outside main namespace) ==
Hello,
I’m sorry to write this message in English. Please help us translating the full message [[d:Wikidata:Wiktionary/Sitelinks|here]]!
Short version: Since yesterday, we are able to store the interwiki links of all the Wiktionaries namespaces (except main, citations, user and talk) in Wikidata. This will not break your Wiktionary, but if you want to use all the features, you will have to remove your sitelinks from wikitext and connect your pages to Wikidata.
Important: even if it is technically possible, you '''should not link Wiktionary main namespace pages''' from Wikidata. The interwiki links for them are already provided by [[mw:Extension:Cognate|Cognate]].
[[d:Wikidata:Wiktionary/Sitelinks|Long version available and translatable here.]]
If you encounter any problem or find a bug, feel free to ping me.
Thanks, [[User:MediaWiki message delivery|MediaWiki message delivery]] ([[User talk:MediaWiki message delivery|আলাপ]]) ০৮:২৮, ২১ জুন ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:Lea_Lacroix_(WMDE)/List_Cognate_english&oldid=16834421-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== Enabling Page Previews ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr"><div class="plainlinks">
''' Enabling Page Previews '''
Hello,
Based on the positive results of [[:mw:Beta Features/Hovercards#Success_Metrics_and_Feature_Evaluation|a series of quantitative and qualitative tests]], the [[mw:Reading|Reading web team]] at the Wikimedia Foundation would like to enable the [[mw:Page Previews|Page Previews]] feature for this project.
Page Previews provide a preview of any linked article, giving readers a quick understanding of a related article without leaving the current page. However, we realize the needs of the Wiktionary community will not be satisfied by the current implementation of the feature. We are interested in developing separate previews for Wiktionary that will display the wiktionary definition of an item, as well as an image, if available.
We'd like to invite some discussion around interest in this feature, as well as the requirements and elements that would make it useful for the Wiktionary community.
</div></div> [[User:CKoerner (WMF)|CKoerner (WMF)]] ১৫:০২, ৬ জুলাই ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:CKoerner_(WMF)/Enable_Hovercards_Wiktionary/Distribution_list&oldid=16968644-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:CKoerner (WMF)@metawiki পাঠিয়েছেন -->
== Accessible editing buttons ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">The MediaWiki developers have been slowly improving the accessibility of the user interface. The next step in this transition will change the appearance of some buttons and may break some outdated (non-updated or unmaintained) user scripts and gadgets.
You can see and use the [https://www.mediawiki.org/wiki/Project:Sandbox?action=submit&ooui=0 old] and [https://www.mediawiki.org/wiki/Project:Sandbox?action=submit&ooui=1 new] versions now. Most editors will only notice that some buttons are slightly larger and have different colors.
<gallery mode="nolines" caption="Comparison of old and new styles" heights="240" widths="572">
File:MediaWiki edit page buttons accessibility change 2017, before.png|Buttons before the change
File:MediaWiki edit page buttons accessibility change 2017, after.png|Buttons after the change
</gallery>
However, this change also affects some user scripts and gadgets. Unfortunately, some of them may not work well in the new system. <mark>If you maintain any user scripts or gadgets that are used for editing, please see '''[[:mw:Contributors/Projects/Accessible editing buttons]]''' for information on how to test and fix your scripts. Outdated scripts can be tested and fixed now.</mark>
This change will probably reach this wiki on '''Tuesday, 1 August 2017'''. Please leave a note at [[:mw:Talk:Contributors/Projects/Accessible editing buttons]] if you need help.</div> --[[m:User:Whatamidoing (WMF)|Whatamidoing (WMF)]] ([[m:User talk:Whatamidoing (WMF)|talk]]) ১৬:৫৬, ২৭ জুলাই ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:Whatamidoing_(WMF)/Sandbox&oldid=17043399-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Quiddity (WMF)@metawiki পাঠিয়েছেন -->
== Columns for references ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">''{{Int:Please-translate}}'' • ''[[:m:Special:MyLanguage/Editing/Columns for references|Read this in another language]]''
Hello,
At the request of Wikipedia editors, a new feature has been added to MediaWiki. Long lists of references (also called citations or footnotes) will automatically be displayed in columns. This will make it easier for most people to read the references, especially on narrow screens. Short lists of references will not be affected.
I plan to enable this new feature at this wiki on Monday, 11 September 2017. After that date, use the normal <code><nowiki><references /></nowiki></code> tag on any page with many references to see this feature. If you do not want columns used on that page, then use this wikitext code instead: <code><nowiki><references responsive="0" /></nowiki></code>
If you believe that this new feature is not appropriate for this wiki, or if you need help adjusting templates, then please contact me at [[mw:Contributors/Projects/Columns for references]]. --[[User:Whatamidoing (WMF)]] ([[User talk:Whatamidoing (WMF)|talk]])</div> ১৮:২৩, ১ সেপ্টেম্বর ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery/Wiktionary&oldid=17181867-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Quiddity (WMF)@metawiki পাঠিয়েছেন -->
== মিশ্র ক্রিয়া প্রসঙ্গে ==
সম্প্রতি '[[খুন]]' ভুক্তিতে লিখতে গিয়ে মিশ্র ক্রিয়া নিয়ে ঝামেলায় পড়েছিলাম। এ সম্পর্কে উইকিঅভিধানের নীতিমালা কী হবে জানার জন্য আলোচনাসভায় তুলেছি।
বিশেষ্য/বিশেষণ/ধ্বনাত্মক অব্যয়ের সঙ্গে কর্, হ্, দে, পা, যা, কাট্, গা, ছাড়্, ধর্, মার্ প্রভৃতি ধাতুযোগে গঠিত ক্রিয়াপদ বিশেষ বিশেষ অর্থে মিশ্র ক্রিয়া গঠন করে। যেমন- দর্শন করা, গোল্লায় যাওয়া (বিশেষ্যর পরে); প্রীত হওয়া (বিশেষণের পরে); ঝিমঝিম করা/টনটন করা (ধ্বনাত্মক অব্যয়ের পরে)। [মুনীর চৌধুরী, মোফাজ্জল হায়দার চৌধুরী]
সাধারণ অর্থে ''ভাত খাওয়া, মাছ ধরা, মার দেয়া, খুশি হওয়া, বকবক করা'' - এর সবগুলোই মিশ্র ক্রিয়া এবং অর্থও স্পষ্ট। কিন্তু মিশ্র ক্রিয়ারা আক্ষরিক অর্থের (বাচ্যার্থ/মুখ্যার্থ) পাশাপাশি অনেকসময় বিশেষ অর্থ (লক্ষ্যার্থ/ব্যাঙ্গার্থ) প্রকাশ করে, যেমন- ''হাত দেখা, হাত লাগানো, হাত করা''। তখন এগুলো 'বাগধারা'র মধ্যে ধরা হয়, যার অনেকগুলো সুপরিচিত- ''অক্কা পাওয়া, কপাল ফেরা, টনক নড়া'' ইত্যাদি।
* প্রথমোক্তগুলো (''ভাত খাওয়া'', ...) বলা যায় অসীম সংখ্যক হতে পারে, ভাব প্রকাশ ও খুঁটিনাটির জন্য লেখক/কথক বিশেষ্যের সাথে অসংখ্য রকমের ক্রিয়া ব্যবহার করতে পারেন। ''হাত তোলা, ~ ওঠানো, ~ নামানো, ~ দেয়া, ~ রাখা, ~ কাটা, ~ ছাড়া, ~ ধরা,'' ...)। আক্ষরিকভাবে এগুলোর অর্থ বোঝা বা ব্যাখ্যা করা সম্ভব।
* পরবর্তীগুলো (''হাত দেখা,'' ...) পরিমাণ প্রায় সসীম; অভিধানে কিছু অন্তর্ভুক্ত হয়েছে, গল্প-উপন্যাসে অনেক পাওয়া যায়, হরদম কথাবার্তায় ব্যবহহৃত হয়; অল্পকিছু নতুন সৃষ্টিও হয়; আর বাগধারাগুলোতো সাহিত্য ও ব্যাকরণের বইয়েই মোটামুটি সংকলিত আছে।
* এছাড়া কিছু আছে দুরকম অর্থই বহন করছে, যেমন- ''হাত পাতা'' বলতে ভিক্ষা চাওয়া ও সাধারণ অর্থ দুটোই বোঝাতে পারে। আবার ''মনে হওয়া, মনে পড়া, মনে করা'' - এগুলো আসলে লক্ষ্যার্থক হলেও বহুলব্যবহারে এতো পরিচিত হয়ে গেছে যে সেগুলোর আক্ষরিক অর্থ আর খেয়াল হয় না।
:(এসব ক্ষেত্রে [[w:Collocation|কলোকেশন]] বড় ভূমিকা রাখে, যেমন ''চাপাবাজি করা (<del>চাপাবাজি মারা</del>), পদক্ষেপ নেয়া (<del>পদক্ষেপ রাখা</del>)''। উল্লেখ্য, কলোকেশনেরই একটি ধারা হলো গুরুচণ্ডালী, যেমন- ''ভাত খাওয়া (<del>ভাত ভক্ষণ করা</del>)''।)
এখন প্রশ্ন হলো, মিশ্র ক্রিয়াগুলোকে আমরা উইকিঅভিধানে মূল ভুক্তি হিসেবে নেবো কিনা। নিলেও সবগুলো নেবো না শুধু লক্ষ্যার্থকগুলো? দ্ব্যর্থবোধক বা অতিপরিচিতগুলোর ব্যাপারেই বা কী করা হবে?
:হাতের কাছে ''[[w:বাংলা একাডেমি ব্যবহারিক বাংলা অভিধান|বাংলা একাডেমি ব্যবহারিক বাংলা অভিধানে]]'' দেখতে পাচ্ছি বাগধারা অধিকাংশই আছে, বাকি লক্ষ্যার্থকগুলো কোথাও আছে কোথাও নেই, আক্ষরিকগুলো মাঝেমধ্যে চোখে পড়ে; রকম তিনটি যথাক্রমে 'ক্রিয়া (আলঙ্কারিক)', 'ক্রিয়া (ব্যাঙ্গার্থক)' ও 'ক্রিয়া' পদ হিসেবে নির্দেশিত হয়েছে। ''ঘুম'' ভুক্তিতে ''ঘুম দেওয়া, ~ যাওয়া, ~ লাগানো'' আছে, কিন্তু ''ঘুম পাড়া'' নেই, আবার ''ঘুম পাড়ানো'' আছে। ''ঘুম ধরা, ~ পাওয়া, ~ চটে যাওয়া'' এগুলোও আছে। এদিকে সদ্যপ্রকাশিত একাডেমীর ''আধুনিক বাংলা অভিধানে'' আবার কিছু রাখা হয়েছে, কিছু হয়নি। যেমন ঘুম শব্দের কোনো মিশ্র ক্রিয়াই দেখানো হয়নি; অন্যদিকে হাত শব্দে ১৭-১৮টা দেখানো হয়েছে, চিহ্নিত করা হয়েছে 'ক্রিয়াবিশেষ্য' হিসেবে। ''সংসদ অভিধানে''ও অনুরূপ কাজ করেছে। মোটকথা, নির্দিষ্ট কোনো নিয়ম অনুসৃত হয়নি, তবে মিশ্র ক্রিয়াকে আলাদা ভুক্তির মর্যাদা দেয়া হয়েছে।।
মিশ্র ক্রিয়া ইংরেজিতে বাংলার মতো অতো বিস্তৃত না হলেও যথেষ্ট পরিমাণেই আছে (have a bath, make a bath, take a bath)।
:অক্সফোর্ড ডিকশনারিতে সম্ভাব্য প্রচলিত সব মিশ্র ক্রিয়াই উদাহরণের মধ্যে উল্লেখ করা হয়েছে এবং লক্ষ্যার্থকগুলোতে প্রথম বন্ধনীর মধ্যে অর্থ দেয়া হয়েছে। যেমন- I will ''have a bath''. You must ''pay attention'' (give concentration). তবে সেগুলোকে পৃথক ভুক্তির মর্যাদা দেয়া হয়নি।
:ইংরেজি উইকিশনারি এক্ষেত্রে শর্টকাট পদ্ধতিতে গেছে। [https://en.wiktionary.org/wiki/Appendix:Collocations_of_do,_have,_make,_and_take এই পাতায়] প্রচুর শব্দের সাথে do, have, make, take এই চারটা মাত্র ক্রিয়াপদের মিশ্র ক্রিয়া রূপটা অর্থসহ দেখানো হয়েছে। আবার তালিকাভুক্ত বেশিরভাগই পৃথক ভুক্তি হিসেবেও আছে।
ব্যক্তিগতভাবে আমি অক্সফোর্ডের শৈলিটা পছন্দ করি। অর্থাৎ উদাহরণ বাক্যেই মিশ্র ক্রিয়ার রূপ দেখিয়ে দেব, প্রয়োজনে বন্ধনীবদ্ধ অর্থসহ। এতে অবশ্য 'হাত'-জাতীয় ভুক্তিতে প্রচুর মিশ্র ক্রিয়ার জন্যে প্রচুর উদাহরণ বাক্য রাখতে হবে, যদিও উইকিশনারির নীতিমালা বেশি উদাহরণ-বাক্যের বিপক্ষে। যাইহোক, সবার মত-অমত পেলে নীতিমালা করা সহজ হবে। - [[User:Rezwan Khair|রেজওয়ান]] ([[User talk:Rezwan Khair|আলাপ]]) ১৯:৪১, ৪ অক্টোবর ২০১৭ (ইউটিসি)
::[[ব্যবহারকারী:Rezwan Khair|Rezwan Khair]], আমার সংক্ষিপ্ত মতামত হল: এগুলি যদি অসীম সংখ্যক হয় তাহলে আমরা আমাদের দৈনন্দিন জীবনে ব্যবহার করি এমন কয়েকশত নিয়ে ভুক্তি তৈরি করুন। বাকী ক্ষেত্রে ইংরেজি অভিধানের মত শটকার্ট অনুসরণ করতে পারেন। --[[ব্যবহারকারী:Aftabuzzaman|Aftabuzzaman]] ([[ব্যবহারকারী আলাপ:Aftabuzzaman|আলাপ]]) ১৮:৫৪, ৫ অক্টোবর ২০১৭ (ইউটিসি)
:::শর্টকাট পদ্ধতি মনে হয় কাজে লাগবে না, কারণ বাংলাতে মিশ্র ক্রিয়ার পরিমাণ অনেক বেশি। আমার মত হলো লক্ষ্যার্থকগুলোর উল্লেযোগ্যতা নিয়ে যেহেতু সন্দেহ নেই, তাই সেগুলোর পৃথক ভুক্তি হবে; তবে সাধারণ অর্থেরগুলো মূল শব্দের উদাহরণে অন্তর্ভুক্ত থাকবে। আর দ্ব্যর্থবোধকগুলো লক্ষ্যার্থের ভুক্তিতে থাকুক, তবে সাধারণ অর্থ সেখানে উল্লেখ করে দেয়া হবে। আর অতিপরিচিত শব্দগুলোও বিশেষ অর্থ বহন করলে লক্ষ্যার্থকেরই মধ্যে থাকবে। পৃথক ভুক্তিগুলোতে পদনির্দেশ ক্রিয়া হবে এবং বিষয়শ্রেণীতে ক্রিয়া, মিশ্র ক্রিয়া, ক্রিয়া বিশেষ্য তিনটিই উল্লেখ করা হবে। আশা করি এতে কাজ চলবে। - [[ব্যবহারকারী:Rezwan Khair|রেজওয়ান]] ([[ব্যবহারকারী আলাপ:Rezwan Khair|আলাপ]]) ১৮:৩৩, ১৩ অক্টোবর ২০১৭ (ইউটিসি)
== সামাজিক যোগাযোগ ==
সুধী, বাংলা উইকিঅভিধানের সামাজিক যোগাযোগ বাংলা উইকিপিডিয়ার মত সাইটনোটিশ আকারে ও প্রধান পাতায় দিচ্ছি। আপনাদের মতামত জানান। ধন্যবাদ।--'''<span style="text-shadow:7px 7px 8px Black;">[[User:NahidSultan|<font face="Papyrus">যুদ্ধমন্ত্রী</font>]] <sup>[[User talk:NahidSultan#top|<font face="Papyrus">আলাপ</font>]]</sup></span>''' ১৬:০৭, ২৫ অক্টোবর ২০১৭ (ইউটিসি)
== [[m:Special:MyLanguage/Tech/News/2017/45|Tech News: 2017-45]] ==
<section begin="technews-2017-W45"/><div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
উইকিমিডিয়া কারিগরি সম্প্রদায় থেকে প্রকাশিত সাম্প্রতিক '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]'''। অনুগ্রহ করে এই পরিবর্তন সম্পর্কে অন্যান্য ব্যবহারকরীদের জানান। সকল পরিবর্তন আপনাকে প্রভাবিত করবে না। আরও [[m:Special:MyLanguage/Tech/News/2017/45|অনুবাদ]] উপলব্ধ রয়েছে।
'''সাম্প্রতিক পরিবর্তনসমূহ'''
*আপনি আর [[Special:Log|বিশেষ:লগ]]-এ টহলদানের লগ দেখতে পাবেন না যদিনা আপনি তা নির্দিষ্টকরে নির্বাচন করেন। [https://gerrit.wikimedia.org/r/384197]
'''এই সপ্তাহের পরিবর্তনসমূহ'''
*[[m:Special:MyLanguage/2017 Community Wishlist Survey|২০১৭ সম্প্রদায়ের ইচ্ছা তালিকার জরিপ]] ৬ নভেম্বরে শুরু হবে। আপনি [http://www.timeanddate.com/worldclock/fixedtime.html?hour=19&min=00&sec=0&day=06&month=11&year=2017 ১৯:০০ ইউটিসি] থেকে ১৯ নভেম্বর পর্যন্ত প্রস্তাবনা জমা দিতে পারবেন।
* <span title="পৌনঃপুনিক উপাদান">[[File:Octicons-sync.svg|12px|link=]]</span> মিডিয়াউইকির [[mw:MediaWiki 1.31/wmf.7|নতুন সংস্করণ]] টেস্ট উইকি এবং MediaWiki.org সাইটে {{#time:j xg|2017-11-07|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে। অ-উইকিপিডিয়া ও কিছু উইকিপিডিয়ায় এটি {{#time:j xg|2017-11-08|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে। সকল উইকিতে এটি {{#time:j xg|2017-11-09|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে ([[mw:MediaWiki 1.31/Roadmap|পঞ্জিকা]])।
'''সভা'''
* <span title="উন্নত উপাদান">[[File:Octicons-tools.svg|15px|link=]]</span> আপনি IRC-তে প্রযুক্তিগত পরামর্শ সভায় যোগ দিতে পারেন। সভায় স্বেচ্ছাসেবক উন্নয়নকারীরা পরামর্শ পেতে প্রশ্ন করতে পারেন।। সভাটি [http://www.timeanddate.com/worldclock/fixedtime.html?hour=16&min=00&sec=0&day=08&month=11&year=2017 {{#time:j xg|2017-11-08|{{CURRENTCONTENTLANGUAGE}}}} তারিখে ১৬:০০ টায় (ইউটিসি)] অনুষ্ঠিত হবে। জানুন [[mw:Technical Advice IRC Meeting|কিভাবে যোগ দিতে হয়]]।
'''ভবিষ্যতের পরিবর্তনসমূহ'''
*উইকিমিডিয়ার উইকিসমূহে অনুচ্ছেদের সাথে যুক্ত অ-লাতিন লিপির [[:w:bn:ইউআরএল|ইউআরএলগুলি]] <code>https://ru.wikipedia.org/wiki/Википедия#История</code>-এর পরিবর্তে <code>https://ru.wikipedia.org/wiki/Википедия#.D0.98.D1.81.D1.82.D0.BE.D1.80.D0.B8.D1.8F</code> দেখায়। এটি শীঘ্রই সংশোধন করা হবে। পুরানো লিঙ্কগুলিও কাজ করবে। [https://phabricator.wikimedia.org/T152540][https://phabricator.wikimedia.org/T75092]
''[[m:Special:MyLanguage/Tech/News/Writers|প্রযুক্তি সংবাদ লেখক]] দ্বারা প্রস্তুতকৃত ও [[m:Special:MyLanguage/User:MediaWiki message delivery|বট]] দ্বারা প্রকাশিত '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]''' • [[m:Special:MyLanguage/Tech/News#contribute|অবদান রাখুন]] • [[m:Special:MyLanguage/Tech/News/2017/45|অনুবাদ করুন]] • [[m:Tech|সাহায্য পান]] • [[m:Talk:Tech/News|মতামত জানান]] • [[m:Global message delivery/Targets/Tech ambassadors|গ্রাহকত্ব পরিচালনা করুন]]।''
</div></div> <section end="technews-2017-W45"/> ১৮:৪৫, ৬ নভেম্বর ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=17381742-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Changes to the global ban policy ==
<div lang="en" dir="ltr" class="mw-content-ltr">Hello. Some changes to the [[m:Global bans|community global ban policy]] have been proposed. Your comments are welcome at [[:m:Requests for comment/Improvement of global ban policy]]. Please translate this message to your language, if needed. Cordially. [[:m:User:Matiia|Matiia]] ([[:m:User talk:Matiia|Matiia]]) ০০:৩৪, ১২ নভেম্বর ২০১৭ (ইউটিসি)</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=17241561-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Matiia@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Tech/News/2017/46|Tech News: 2017-46]] ==
<section begin="technews-2017-W46"/><div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
উইকিমিডিয়া কারিগরি সম্প্রদায় থেকে প্রকাশিত সাম্প্রতিক '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]'''। অনুগ্রহ করে এই পরিবর্তন সম্পর্কে অন্যান্য ব্যবহারকরীদের জানান। সকল পরিবর্তন আপনাকে প্রভাবিত করবে না। আরও [[m:Special:MyLanguage/Tech/News/2017/46|অনুবাদ]] উপলব্ধ রয়েছে।
'''এই সপ্তাহের পরিবর্তনসমূহ'''
*আপনি পাতার দুটি ভিন্ন সংস্করণ তুলনা করার জন্য যে পার্থক্য দেখেন তা [[mw:MediaWiki|MediaWiki.org]] ও [[testwiki:Main Page|টেস্ট উইকিতে]] পরিবর্তিত হয়েছে। এটি স্থানান্তরিত অনুচ্ছেদে লেখা সহজে খুঁজে পেতে করা হয়েছে। আশা করা হচ্ছে শীগ্রই এটি আরো উইকিতে আসবে। আপনি [[phab:T146781|ফ্যাব্রিকেটরে সমস্যার প্রতিবেদন]] জানাতে পারেন। [https://lists.wikimedia.org/pipermail/wikitech-l/2017-November/089099.html]
*কমন্সের একটি নতুন ব্যবহারকারী দল [[w:bn:এমপি৩|এমপি৩]] ফাইল আপলোড করতে সক্ষম হবেন। ১৭ নভেম্বর থেকে এই ব্যবহারকারী দল সৃষ্টি করার পরিকল্পনা করা হয়েছে। [https://phabricator.wikimedia.org/T180002]
* Flagged Revisions ব্যবহার করা উইকিগুলি সাম্প্রতিক পরিবর্তনের পাতায় পূর্বনির্ধারিতভাবে [[:mw:Special:MyLanguage/Edit Review Improvements/New filters for edit review|সম্পাদনা পর্যালোচনার জন্য নতুন ছাঁকনি]] পাবে। ব্যবহারকারীগণ তাঁদের পছন্দসমূহ থেকে এটি নিষ্ক্রিয় করতে পারবেন। [https://phabricator.wikimedia.org/T178990]
* <span title="পৌনঃপুনিক উপাদান">[[File:Octicons-sync.svg|12px|link=]]</span> মিডিয়াউইকির [[mw:MediaWiki 1.31/wmf.8|নতুন সংস্করণ]] টেস্ট উইকি এবং MediaWiki.org সাইটে {{#time:j xg|2017-11-14|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে। অ-উইকিপিডিয়া ও কিছু উইকিপিডিয়ায় এটি {{#time:j xg|2017-11-15|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে। সকল উইকিতে এটি {{#time:j xg|2017-11-16|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে ([[mw:MediaWiki 1.31/Roadmap|পঞ্জিকা]])।
'''সভা'''
* <span title="উন্নত উপাদান">[[File:Octicons-tools.svg|15px|link=]]</span> আপনি IRC-তে প্রযুক্তিগত পরামর্শ সভায় যোগ দিতে পারেন। সভায় স্বেচ্ছাসেবক উন্নয়নকারীরা পরামর্শ পেতে প্রশ্ন করতে পারেন।। সভাটি [http://www.timeanddate.com/worldclock/fixedtime.html?hour=16&min=00&sec=0&day=15&month=11&year=2017 {{#time:j xg|2017-11-15|{{CURRENTCONTENTLANGUAGE}}}} তারিখে ১৬:০০ টায় (ইউটিসি)] অনুষ্ঠিত হবে। জানুন [[mw:Technical Advice IRC Meeting|কিভাবে যোগ দিতে হয়]]।
'''ভবিষ্যতের পরিবর্তনসমূহ'''
*উইকিমিডিয়া কমন্সে আপলোড করার এবং দেখার জন্য ত্রিমাত্রিক মডেলের সমর্থন শীঘ্রই আসছে। এই বৈশিষ্ট্যটি [[w:en:STL (file format)|.STL ফাইল বিন্যাস]] সমর্থন করবে। আপনি [[testwiki:File:Programmatically created crystal.stl|টেস্ট উইকিতে একটি উদাহরণ]] দেখতে পারেন। [https://www.mediawiki.org/wiki/Help:Extension:3d]
''[[m:Special:MyLanguage/Tech/News/Writers|প্রযুক্তি সংবাদ লেখক]] দ্বারা প্রস্তুতকৃত ও [[m:Special:MyLanguage/User:MediaWiki message delivery|বট]] দ্বারা প্রকাশিত '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]''' • [[m:Special:MyLanguage/Tech/News#contribute|অবদান রাখুন]] • [[m:Special:MyLanguage/Tech/News/2017/46|অনুবাদ করুন]] • [[m:Tech|সাহায্য পান]] • [[m:Talk:Tech/News|মতামত জানান]] • [[m:Global message delivery/Targets/Tech ambassadors|গ্রাহকত্ব পরিচালনা করুন]]।''
</div></div> <section end="technews-2017-W46"/> ১৯:১৯, ১৩ নভেম্বর ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=17417996-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Tech/News/2017/47|Tech News: 2017-47]] ==
<section begin="technews-2017-W47"/><div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
উইকিমিডিয়া কারিগরি সম্প্রদায় থেকে প্রকাশিত সাম্প্রতিক '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]'''। অনুগ্রহ করে এই পরিবর্তন সম্পর্কে অন্যান্য ব্যবহারকরীদের জানান। সকল পরিবর্তন আপনাকে প্রভাবিত করবে না। আরও [[m:Special:MyLanguage/Tech/News/2017/47|অনুবাদ]] উপলব্ধ রয়েছে।
'''সাম্প্রতিক পরিবর্তনসমূহ'''
* আপনি যদি [[w:bn:এনড্রয়েড (অপারেটিং সিস্টেম)|অ্যান্ড্রয়েডে]] ক্রোম ওয়েব ব্রাউজার ব্যবহার করেন, আপনি মোবাইল ওয়েবসাইটে একটি ডাউনলোড আইকন দেখতে পাবেন। তাতে ক্লিক করে আপনি একটি বিন্যাসিত পিডিএফ ডাউনলোড করতে পারবেন। এটি ভবিষ্যতে অন্য মোবাইল ব্রাউজারেও কাজ করবে। [https://www.mediawiki.org/wiki/Reading/Web/Projects/Mobile_PDFs]
* <span title="উন্নত উপাদান">[[File:Octicons-tools.svg|15px|link=]]</span> অপব্যবহার ছাঁকনির এখন <code>get_matches</code> নামক ফাংশন আছে। আপনি রেগুলার এক্সপ্রেশন থেকে মিলগুলি সংরক্ষণ করতে এটি ব্যবহার করতে পারেন - প্রতিটি ক্যাপচার গ্রুপে একটি করে। আপনি [[phabricator:T179957|ফ্যাব্রিকেটরে]] আরো পড়তে পারেন।
'''সমস্যাসমূহ'''
* একটি ডেটাবেস ক্র্যাশের কারণে গত সপ্তাহের মিডিয়াউইকির সংস্করণ সব উইকিপিডিয়াতে আসেনি। ২০ নভেম্বরে এটি সবকটি উইকিতে আসবে। [https://phabricator.wikimedia.org/T180714][https://phabricator.wikimedia.org/T178635][https://wikitech.wikimedia.org/wiki/Incident_documentation/20171116-s5-dewiki-wikidata]
'''এই সপ্তাহের পরিবর্তনসমূহ'''
* এই সপ্তাহে কোন নতুন মিডিয়াউইকি সংস্করণ প্রকাশিত হবে না।
'''সভা'''
* <span title="পৌনঃপুনিক উপাদান">[[File:Octicons-sync.svg|12px|link=]]</span> আপনি সম্পাদক দলের সঙ্গে পরবর্তী সভায় যোগ দিতে পারেন। সভাকালীন, আপনি যে সমস্যাটি সবচেয়ে গুরুত্বপূর্ণ বলে মনে করেন তা উন্নয়নকারীদেরকে জানাতে পারেন। সভাটি [https://www.timeanddate.com/worldclock/fixedtime.html?hour=19&min=30&sec=0&year=2017&month=11&day=21 {{#time:j xg|2017-11-21|{{CURRENTCONTENTLANGUAGE}}}} তারিখে ১৯:৩০ টায় (ইউটিসি)] অনুষ্ঠিত হবে। জানুন [[mw:Editing team/Weekly triage meetings|কিভাবে যোগ দিতে হয়]]।
* <span title="উন্নত উপাদান">[[File:Octicons-tools.svg|15px|link=]]</span> আপনি IRC-তে প্রযুক্তিগত পরামর্শ সভায় যোগ দিতে পারেন। সভায় স্বেচ্ছাসেবক উন্নয়নকারীরা পরামর্শ পেতে প্রশ্ন করতে পারেন।। সভাটি [http://www.timeanddate.com/worldclock/fixedtime.html?hour=16&min=00&sec=0&day=22&month=11&year=2017 {{#time:j xg|2017-11-22|{{CURRENTCONTENTLANGUAGE}}}} তারিখে ১৬:০০ টায় (ইউটিসি)] অনুষ্ঠিত হবে। জানুন [[mw:Technical Advice IRC Meeting|কিভাবে যোগ দিতে হয়]]।
'''ভবিষ্যতের পরিবর্তনসমূহ'''
* ভাষা রূপান্তরকারী সিনট্যাক্স আর বাহ্যিক সংযোগগুলির ভিতরে কাজ করবে না। <code><nowiki>http://-{zh-cn:foo.com; zh-hk:bar.com; zh-tw:baz.com}-</nowiki></code> এর মত উইকিপাঠ্য অবশ্যই প্রতিস্থাপন করতে হবে। এর পরিবর্তে আপনাকে <code><nowiki>-{zh-cn: http://foo.com ; zh-hk: http://bar.com ; zh-tw:http://baz.com }-</nowiki></code> লিখতে হবে। এটি শুধুমাত্র ভাষা রূপান্তরকারী সক্ষম করা ভাষাগুলিকে প্রভাবিত করবে। এই ধরনের ভাষাগুলির উদাহরণ হল চীনা এবং সার্বীয়। এটি পরের সপ্তাহে ঘটবে। [https://phabricator.wikimedia.org/T119158][https://lists.wikimedia.org/pipermail/wikitech-ambassadors/2017-November/001730.html]
''[[m:Special:MyLanguage/Tech/News/Writers|প্রযুক্তি সংবাদ লেখক]] দ্বারা প্রস্তুতকৃত ও [[m:Special:MyLanguage/User:MediaWiki message delivery|বট]] দ্বারা প্রকাশিত '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]''' • [[m:Special:MyLanguage/Tech/News#contribute|অবদান রাখুন]] • [[m:Special:MyLanguage/Tech/News/2017/47|অনুবাদ করুন]] • [[m:Tech|সাহায্য পান]] • [[m:Talk:Tech/News|মতামত জানান]] • [[m:Global message delivery/Targets/Tech ambassadors|গ্রাহকত্ব পরিচালনা করুন]]।''
</div></div> <section end="technews-2017-W47"/> ১৯:১৮, ২০ নভেম্বর ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=17447294-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== New print to pdf feature for mobile web readers ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
'''New print to pdf feature for mobile web readers'''
The Readers web team will be deploying a new feature this week to make it [[mw:Reading/Web/Projects/Mobile_PDFs|easier to download PDF versions of articles on the mobile website]].
Providing better offline functionality was one of the highlighted areas from [[m:New_Readers/Offline|the research done by the New Readers team in Mexico, Nigeria, and India]]. The teams created a prototype for mobile PDFs which was evaluated by user research and community feedback. The [[m:New_Readers/Offline#Concept_testing_for_mobile_web|prototype evaluation]] received positive feedback and results, so development continued.
For the initial deployment, the feature will be available to Google Chrome browsers on Android. Support for other mobile browsers to come in the future. For Chrome, the feature will use the native Android print functionality. Users can choose to download a webpage as a PDF. [[mw:Reading/Web/Projects/Print_Styles#Mobile_Printing|Mobile print styles]] will be used for these PDFs to ensure optimal readability for smaller screens.
The feature is available starting Wednesday, Nov 15. For more information, see [[mw:Reading/Web/Projects/Mobile_PDFs|the project page on MediaWiki.org]].
{{Int:Feedback-thanks-title}}
</div> [[m:User:CKoerner (WMF)|CKoerner (WMF)]] ([[m:User talk:CKoerner (WMF)|talk]]) ২২:০৭, ২০ নভেম্বর ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:CKoerner_(WMF)/Mobile_PDF_distribution_list&oldid=17448927-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:CKoerner (WMF)@metawiki পাঠিয়েছেন -->
== [[m:Special:MyLanguage/Tech/News/2017/48|Tech News: 2017-48]] ==
<section begin="technews-2017-W48"/><div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
উইকিমিডিয়া কারিগরি সম্প্রদায় থেকে প্রকাশিত সাম্প্রতিক '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]'''। অনুগ্রহ করে এই পরিবর্তন সম্পর্কে অন্যান্য ব্যবহারকরীদের জানান। সকল পরিবর্তন আপনাকে প্রভাবিত করবে না। আরও [[m:Special:MyLanguage/Tech/News/2017/48|অনুবাদ]] উপলব্ধ রয়েছে।
'''সাম্প্রতিক পরিবর্তনসমূহ'''
* আপনি এখন mediawiki.org তে নতুন [[mw:Help:Extension:AdvancedSearch|উন্নত অনুসন্ধান ফাংশন বিটা বৈশিষ্ট্য]] পরীক্ষা করতে পারবেন। এটি কিছু বিশেষ অনুসন্ধানের ফাংশন ব্যবহার করা সহজ করে তোলে যা বেশিরভাগ সম্পাদকেরা জানেনই না যার অস্তিত্ব আছে। এটি এই সপ্তাহে জার্মান ও আরবি উইকিপিডিয়ায় আসবে। এটি পরে আরো উইকিতে আসবে। [https://lists.wikimedia.org/pipermail/wikitech-l/2017-November/089148.html]
* আপনি এখন [https://tools.wmflabs.org/ia-upload/commons/init ইন্টারনেট আর্কাইভ আপলোড সরঞ্জাম] দিয়ে বৃহৎ ফাইল আপলোড করতে পারবেন। পূর্বে ১০০ মেগাবাইটের বড় ফাইল আপলোড করা যেত না। [https://lists.wikimedia.org/pipermail/wikisource-l/2017-November/003365.html]
* আপনি এখন সকল উইকিতে [https://www.mediawiki.org/wiki/Skin:Timeless?useskin=timeless টাইমলেস আবরণ] ব্যবহার করতে পারবেন। আপনি [[Special:Preferences#mw-prefsection-rendering|বিশেষ:পছন্দ#Appearance]]-এ যেয়ে আবরণ পছন্দ করতে পারবেন। [https://phabricator.wikimedia.org/T154371]
'''এই সপ্তাহের পরিবর্তনসমূহ'''
* <span title="পৌনঃপুনিক উপাদান">[[File:Octicons-sync.svg|12px|link=]]</span> মিডিয়াউইকির [[mw:MediaWiki 1.31/wmf.10|নতুন সংস্করণ]] টেস্ট উইকি এবং MediaWiki.org সাইটে {{#time:j xg|2017-11-28|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে। অ-উইকিপিডিয়া ও কিছু উইকিপিডিয়ায় এটি {{#time:j xg|2017-11-29|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে। সকল উইকিতে এটি {{#time:j xg|2017-11-30|{{CURRENTCONTENTLANGUAGE}}}} থেকে উপলব্ধ হবে ([[mw:MediaWiki 1.31/Roadmap|পঞ্জিকা]])।
'''সভা'''
* <span title="পৌনঃপুনিক উপাদান">[[File:Octicons-sync.svg|12px|link=]]</span> আপনি সম্পাদক দলের সঙ্গে পরবর্তী সভায় যোগ দিতে পারেন। সভাকালীন, আপনি যে সমস্যাটি সবচেয়ে গুরুত্বপূর্ণ বলে মনে করেন তা উন্নয়নকারীদেরকে জানাতে পারেন। সভাটি [https://www.timeanddate.com/worldclock/fixedtime.html?hour=18&min=30&sec=0&year=2017&month=11&day=28 {{#time:j xg|2017-11-28|{{CURRENTCONTENTLANGUAGE}}}} তারিখে ১৯:৩০ টায় (ইউটিসি)] অনুষ্ঠিত হবে। জানুন [[mw:Editing team/Weekly triage meetings|কিভাবে যোগ দিতে হয়]]।
* <span title="উন্নত উপাদান">[[File:Octicons-tools.svg|15px|link=]]</span> আপনি IRC-তে প্রযুক্তিগত পরামর্শ সভায় যোগ দিতে পারেন। সভায় স্বেচ্ছাসেবক উন্নয়নকারীরা পরামর্শ পেতে প্রশ্ন করতে পারেন।। সভাটি [http://www.timeanddate.com/worldclock/fixedtime.html?hour=16&min=00&sec=0&day=29&month=11&year=2017 {{#time:j xg|2017-11-29|{{CURRENTCONTENTLANGUAGE}}}} তারিখে ১৬:০০ টায় (ইউটিসি)] অনুষ্ঠিত হবে। জানুন [[mw:Technical Advice IRC Meeting|কিভাবে যোগ দিতে হয়]]।
''[[m:Special:MyLanguage/Tech/News/Writers|প্রযুক্তি সংবাদ লেখক]] দ্বারা প্রস্তুতকৃত ও [[m:Special:MyLanguage/User:MediaWiki message delivery|বট]] দ্বারা প্রকাশিত '''[[m:Special:MyLanguage/Tech/News|প্রযুক্তি সংবাদ]]''' • [[m:Special:MyLanguage/Tech/News#contribute|অবদান রাখুন]] • [[m:Special:MyLanguage/Tech/News/2017/48|অনুবাদ করুন]] • [[m:Tech|সাহায্য পান]] • [[m:Talk:Tech/News|মতামত জানান]] • [[m:Global message delivery/Targets/Tech ambassadors|গ্রাহকত্ব পরিচালনা করুন]]।''
</div></div> <section end="technews-2017-W48"/> ২০:৩০, ২৭ নভেম্বর ২০১৭ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=17468068-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Tell us what you think about the automatic links for Wiktionary ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hello all,
(Sorry for writing in English. Feel free to translate this message below.)
One year ago, the Wikidata team started deploying new automatic interwiki links for Wiktionaries. Today, the links for the main namespace are automatically displayed by a Mediawiki extension, and the links for other namespaces are stored in Wikidata. You can find [[d:Special:MyLanguage/Wikidata:Wiktionary/Sitelinks|the documentation here]] (feel free to help translating it in your language).
We would like to know if you encountered problems with the system, if you would have suggestions for further improvements. This could be for example:
* Some automatic links don’t work as expected
* Some problems you encountered with entering links (for non-main namespace) in Wikidata
* Some new features you’d like to have, related to links
To give feedback, you have two options:
* Let a message on [[d:Wikidata talk:Lexicographical data/Sitelinks|this talk page]]
* Let a message here. If you do so, please mention me with the <nowiki>{{ping}}</nowiki> template, so I can get a notification.
Our preferred languages are English, French and German, but you can also let a message in your own language if you feel more comfortable with it.
I’m looking forward for your feedback! [[:d:User:Lea Lacroix (WMDE)|Lea Lacroix (WMDE)]] ১০:২৩, ২৪ এপ্রিল ২০১৮ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/All_Wiktionaries&oldid=17968098-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== AdvancedSearch ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr">
৮ই মে থেকে [[mw:Special:MyLanguage/Help:Extension:AdvancedSearch|উচ্চতর অনুসন্ধান]] এই উইকিতে [[mw:Special:MyLanguage/Beta Features|বেটা বৈশিষ্ট্য]] হিসেবে যুক্ত হবে। এর মাধ্যমে বর্তমান [[Special:Search|অনুসন্ধান]] বাক্সটি আরও উন্নত হবে এবং আরও [[m:WMDE_Technical_Wishes/AdvancedSearch/Functional_scope|ভালোভাবে]] সবার জন্য ফলাফল প্রদর্শন করবে। উচ্চতর অনুসন্ধান প্রকল্পটি [[m:WMDE Technical Wishes/AdvancedSearch|উইকিমিডিয়া জার্মানির প্রযুক্তি দলের]] ইচ্ছায় হচ্ছে। আমরা সবাইকে বৈশিষ্ট্যটি পরীক্ষা করে দেখার অনুরোধ করছি এবং ভবিষ্যতে এটি আপনার কাজকে আরও সহজ করবে বলেই আমাদের বিশ্বাস। </div> [[m:User:Birgit Müller (WMDE)|Birgit Müller (WMDE)]] ১৪:৪৪, ৭ মে ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=WMDE_Technical_Wishes/Technical_Wishes_News_list_1&oldid=17995466-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Birgit Müller (WMDE)@metawiki পাঠিয়েছেন -->
== Lexicographical data is now available on Wikidata ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
Hello,
''Sorry for writing in English. Feel free to translate the content of this message below.''
After several years discussing about it, and one year of development and discussion with the communities, the development team of Wikimedia Germany has now released the first version of [[d:Wikidata:Lexicographical data|lexicographical data support on Wikidata]].
Since the start of Wikidata in 2012, the multilingual knowledge base was mainly focused on concepts: Q-items are related to a thing or an idea, not to the word describing it. Starting now, Wikidata stores a new type of data: words, phrases and sentences, in many languages, described in many languages. This information will be stored in new types of entities, called Lexemes, Forms and Senses.
The goal of lexicographical data on Wikidata is to provide a structured and machine-readable way to describe words and phrases in multiple languages, stored in a same place, reusable under CC-0. In the near future, this data will be available for Wiktionaries and other projects to reuse, as much as you want to.
For now, we’re at the first steps of this project: the new data structure has been released on Wikidata, and we’re looking for people to try it, and give us feedback on what is working or not. Participating to this project is the opportunity for you to have a voice in it, to make sure that your needs and requests are taken in account very early in the process, and to start populating Wikidata with words in your language!
Here’s how you can try lexicographical data on Wikidata:
* First of all, if you’re not familiar with the data model, I encourage you to have a look at [[d:Wikidata:Lexicographical data/Documentation|the documentation page]]. If you’re not familiar with Wikidata at all, I suggest [[d:Help:Contents|this page]] as a start point.
* You can also [https://www.wikidata.org/wiki/Special:AllPages?from=&to=&namespace=146 look at the Lexemes that already exists] (search features will be improved in the future).
* When you feel ready to create a word, go on [[d:Special:NewLexeme]].
* If some properties that you need are missing, you can [[d:Wikidata:Property proposal/Lexemes|suggest them on this page]] (if you’re not sure how to do it, just let a message on the talk page and someone will help you).
* The main discussion page is [[d:Wikidata:Lexicographical data]]. Here, you can ask for help, suggest ways to organize the data, but also leave feedback: if you encounter any bug or issue, let us know. We’re looking especially to know what are the most important features for you to be worked on next.
In any case, feel free to contact me if you have a question or problem, I’ll be very happy to help.
Cheers, [[:d:User:Lea Lacroix (WMDE)|Lea Lacroix (WMDE)]] ১২:২০, ২৩ মে ২০১৮ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/All_Wiktionaries&oldid=18070068-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== উইকিভ্রমণ অনুমোদিত হয়েছে ==
সুধী, আজ [[voy:|বাংলা উইকিভ্রমণ]] ভাষা কমিটি থেকে [[:meta:Requests for new languages/Wikivoyage Bengali|অনুমোদন পেয়েছে]]। এখন বাংলা উইকিভ্রমণ সাইট তৈরির কাজ চলছে। দীর্ঘ ৬ মাস যাবত যারা ইনকিউবেটরের বাংলা উইকিভ্রমণে অবদান রেখে এই অনুমোদন পেতে কাজ করেছেন তাঁদের সকলকে ধন্যবাদ জানাই। সকল অবদানকারীর তালিকা [https://tools.wmflabs.org/meta/catanalysis/index.php?cat=0&title=Wy/bn&wiki=incubatorwiki এখানে পাওয়া যাবে] ([http://archive.is/zv6Ms স্থায়ী])। --[[ব্যবহারকারী:Aftabuzzaman|আফতাব]] ([[ব্যবহারকারী আলাপ:Aftabuzzaman|আলাপ]]) ১৭:৫৫, ৪ জুন ২০১৮ (ইউটিসি)
:https://bn.wikivoyage.org/ তৈরি হয়েছে। --[[ব্যবহারকারী:Aftabuzzaman|আফতাব]] ([[ব্যবহারকারী আলাপ:Aftabuzzaman|আলাপ]]) ২১:৫০, ৭ জুন ২০১৮ (ইউটিসি)
== Update on page issues on mobile web ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
'''Update on page issues on mobile web'''
{{int:please-translate}}
Hi everyone. The [[mw:Reading/Web/Team|Readers web team]] has recently begun working on exposing issue templates on the mobile website. Currently, details about issues with page content are generally hidden on the mobile website. This leaves readers unaware of the reliability of the pages they are reading. The goal of this project is to improve awareness of particular issues within an article on the mobile web. We will do this by changing the visual styling of page issues.
So far, we have [[mw:Reading/Web/Projects/Mobile Page Issues|drafted a proposal on the design and implementation]] of the project. We were also able to run [[mw:Reading/Web/Projects/Mobile Page Issues/Research Results|user testing on the proposed designs]]. The tests so far have positive results. Here is a quick summary of what we learned:
* The new treatment increases awareness of page issues among participants. This is true particularly when they are in a more evaluative/critical mode.
* Page issues make sense to readers and they understand how they work
* Readers care about page issues and consider them important
* Readers had overwhelmingly positive sentiments towards Wikipedia associated with learning about page issues
Our next step would be to start implementing these changes. We wanted to reach out to you for any concerns, thoughts, and suggestions you might have before beginning development. Please [[mw:Reading/Web/Projects/Mobile Page Issues|visit the project page]] where we have more information and mockups of how this may look. Please [[mw:Talk:Reading/Web/Projects/Mobile Page Issues|leave feedback on the talk page]].
</div> [[m:User:CKoerner (WMF)|CKoerner (WMF)]] ([[m:User talk:CKoerner (WMF)|talk]]) ২০:৫৮, ১২ জুন ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:CKoerner_(WMF)/Sandbox&oldid=18120916-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:CKoerner (WMF)@metawiki পাঠিয়েছেন -->
== Global preferences are available ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr">
বৈশ্বিক পছন্দগুলি এখন উপলব্ধ, আপনি নতুন [[Special:GlobalPreferences|বৈশ্বিক পছন্দের পাতা]] পরিদর্শন করে আপনার পছন্দ নির্ধারণ করতে পারেন। কীভাবে এটি ব্যবহার করবেন সে সম্পর্কে তথ্য পেতে এবং [[mw:Help:Extension:GlobalPreferences|মতামত দিতে]] [[mw:Help talk:Extension:GlobalPreferences|mediawiki.org পরিদর্শন করুন]]। -- [[User:Keegan (WMF)|Keegan (WMF)]] ([[m:User talk:Keegan (WMF)|আলাপ]])
</div> ১৯:১৯, ১০ জুলাই ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=17968247-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Keegan (WMF)@metawiki পাঠিয়েছেন -->
== Consultation on the creation of a separate user group for editing sitewide CSS/JS ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
''({{int:please-translate}})''
Hi all,
I'm preparing a change in who can edit sitewide CSS/JS pages. (These are pages like <code dir="ltr">MediaWiki:Common.css</code> and <code dir="ltr">MediaWiki:Vector.js</code> which are executed in the browser of all readers and editors.) Currently all administrators are able to edit these pages, which poses a serious and unnecessary security risk. Soon, a dedicated, smaller user group will take over this task. Your community will be able to decide who belongs in this group, so this should mean very little change for you. You can find out more and provide feedback at [[m:Special:MyLanguage/Creation of separate user group for editing sitewide CSS/JS|the consultation page on Meta]]. If you are involved in maintaining CSS/JS code, or policymaking around adminship requests, please give it a look!
Thanks!
<br/><span dir="ltr">[[m:User:Tgr|Tgr]] ([[m:User talk:Tgr|talk]]) ০৮:৪৫, ১২ জুলাই ২০১৮ (ইউটিসি) <small>(via [[m:Special:MyLanguage/Global_message_delivery|global message delivery]])</small></span>
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Nonechnical_Village_Pumps_distribution_list&oldid=18199925-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Tgr@metawiki পাঠিয়েছেন -->
== New user group for editing sitewide CSS/JS ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
''({{int:please-translate}})''
Hi all!
To improve the security of our readers and editors, permission handling for CSS/JS pages has changed. (These are pages like <code dir="ltr">MediaWiki:Common.css</code> and <code dir="ltr">MediaWiki:Vector.js</code> which contain code that is executed in the browsers of users of the site.)
A new user group, <code dir="ltr">[[m:Special:MyLanguage/Interface administrators|interface-admin]]</code>, has been created.
Starting four weeks from now, only members of this group will be able edit CSS/JS pages that they do not own (that is, any page ending with <code dir="ltr">.css</code> or <code dir="ltr">.js</code> that is either in the <code dir="ltr">MediaWiki:</code> namespace or is another user's user subpage).
You can learn more about the motivation behind the change [[m:Special:MyLanguage/Creation of separate user group for editing sitewide CSS/JS|here]].
Please add users who need to edit CSS/JS to the new group (this can be done the same way new administrators are added, by stewards or local bureaucrats).
This is a dangerous permission; a malicious user or a hacker taking over the account of a careless interface-admin can abuse it in far worse ways than admin permissions could be abused. Please only assign it to users who need it, who are trusted by the community, and who follow common basic password and computer security practices (use strong passwords, do not reuse passwords, use two-factor authentication if possible, do not install software of questionable origin on your machine, use antivirus software if that's a standard thing in your environment).
Thanks!
<br/><span dir="ltr">[[m:User:Tgr|Tgr]] ([[m:User talk:Tgr|talk]]) ১৩:০৮, ৩০ জুলাই ২০১৮ (ইউটিসি) <small>(via [[m:Special:MyLanguage/Global_message_delivery|global message delivery]])</small></span>
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=17968247-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Tgr@metawiki পাঠিয়েছেন -->
== Wiktionary Cognate Dashboard ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
[[File:Screenshot of Hubs on Wiktionary Cognate Dashboard.png|thumb|Screenshot of the Hub view]]
''Sorry for writing this message in English. Feel free to help translate it below.''
Hello all,
A few months ago, we asked you for feedback about [[d:Wikidata:Wiktionary/Sitelinks|Cognate]], the system allowing interwikilinks between Wiktionaries (on main namespace). Several community members gave some suggestions, one of them was to provide statistics about these interwikilinks.
The Wikidata team is pleased to present you the '''[https://wdcm.wmflabs.org/Wiktionary_CognateDashboard/ Wiktionary Cognate Dashboard]''', a website presenting a lot of interesting information about how Wiktionaries are connected to each others. You can find there, for example:
* the most interlinked Wiktionary entries not having a page on your Wiktionary
* the number of interlinks between each possible pair of Wiktionaries
* visualizations of the relationships between different Wiktionaries
To learn more about the tool, you can have a look at [[m:Wiktionary Cognate Dashboard|the documentation]] (please help us translating it in your language!). The interface of the tool itself can also be translated in other languages by [[m:Wiktionary Cognate Dashboard/Interface|using this page]].
If you find a bug, please let a comment on [[phab:T166487|this Phabricator task]] or ping me onwiki. Thanks a lot, [[:d:User:Lea Lacroix (WMDE)|Lea Lacroix (WMDE)]] ১৩:০৮, ১৪ আগস্ট ২০১৮ (ইউটিসি)
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery/Wiktionary&oldid=18299586-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Lea Lacroix (WMDE)@metawiki পাঠিয়েছেন -->
== Editing of sitewide CSS/JS is only possible for interface administrators from now ==
''({{int:please-translate}})''
<div lang="en" dir="ltr" class="mw-content-ltr">
Hi all,
as [[m:Special:MyLanguage/Creation of separate user group for editing sitewide CSS/JS/announcement 2|announced previously]], permission handling for CSS/JS pages has changed: only members of the <code>[[m:Special:MyLanguage/Interface administrators|interface-admin]]</code> ({{int:group-interface-admin}}) group, and a few highly privileged global groups such as stewards, can edit CSS/JS pages that they do not own (that is, any page ending with .css or .js that is either in the MediaWiki: namespace or is another user's user subpage). This is done to improve the security of readers and editors of Wikimedia projects. More information is available at [[m:Special:MyLanguage/Creation of separate user group for editing sitewide CSS/JS|Creation of separate user group for editing sitewide CSS/JS]]. If you encounter any unexpected problems, please contact me or file a bug.
Thanks!<br />
[[m:User:Tgr|Tgr]] ([[m:User talk:Tgr|talk]]) ১২:৩৯, ২৭ আগস্ট ২০১৮ (ইউটিসি) <small>(via [[m:Special:MyLanguage/Global_message_delivery|global message delivery]])</small>
</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18258712-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Tgr@metawiki পাঠিয়েছেন -->
== Read-only mode for up to an hour on 12 September and 10 October ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr"><div class="plainlinks">
[[:m:Special:MyLanguage/Tech/Server switch 2018|অন্য আরেকটি ভাষায় এই বার্তাটি পড়ুন]] • {{int:please-translate}}
[[foundation:|উইকিমিডিয়া ফাউন্ডেশন]] তার গৌণ উপাত্ত কেন্দ্রটি পরীক্ষা করবে। এটি নিশ্চিত করবে যে উইকিপিডিয়া এবং উইকিমিডিয়ার অন্যান্য উইকিসমূহ এমনকি একটি দুর্যোগের পরেও অনলাইনে থাকবে। সবকিছু কাজ করছে তা নিশ্চিত করতে, উইকিমিডিয়ার প্রযুক্তি বিভাগ একটি পরিকল্পিত পরীক্ষা পরিচালনা করবে। এই পরীক্ষাটি প্রদর্শন করবে যে তাঁদের নির্ভরযোগ্যভাবে একটি উপাত্ত কেন্দ্র থেকে অন্য উপাত্ত কেন্দ্রে পরিবর্তন করা যাবে কিনা। এটির জন্য অনেক দলকে পরীক্ষার জন্য প্রস্তুত করা প্রয়োজন এবং অপ্রত্যাশিত সমস্যা সমাধানের জন্য প্রস্তুত থাকা প্রয়োজন।
তারা গৌণ উপাত্ত কেন্দ্রে '''১২ সেপ্টেম্বর, বুধবারে''' সকল ট্রাফিক নিয়ে যাবে।
'''১০ অক্টোবর, বুধবারে''', তাঁরা আবার প্রাথমিক উপাত্ত কেন্দ্রে ফিরে আসবে।
দুর্ভাগ্যবশত, [[mw:Manual:What is MediaWiki?|মিডিয়াউইকির]] কিছু সীমাবদ্ধতার কারণে, ঐ দুটি পরিবর্তনের সময় সব সম্পাদনা অবশ্যই বন্ধ রাখতে হবে। এই ব্যাঘাত ঘটানোর জন্য জন্য আমরা ক্ষমাপ্রার্থী, এবং আমরা ভবিষ্যতে এটিকে হ্রাস করার জন্য কাজ করছি।
'''সব উইকিতে অল্প সময়ের জন্য, আপনি সম্পাদনা করতে পারবেন না তবে আপনি উইকি পড়তে সক্ষম হবেন।'''
*আপনি ১২ সেপ্টেম্বর, বুধবার ও ১০ অক্টোবর, বুধবারে প্রায় আধা ঘণ্টার মত সম্পাদনা করতে পারবেন না। পরীক্ষাটি শুরু হবে বাংলাদেশ সময় [https://www.timeanddate.com/worldclock/fixedtime.html?iso=20170503T14 রাত ৮টায়] (১৪:০০ ইউটিসি, পশ্চিমবঙ্গে রাত ৭টা ৩০ মিনিটে)।
*এই সময়ে আপনি যদি সম্পাদনা বা সংরক্ষণ করার চেষ্টা করেন, তাহলে আপনি একটি ত্রুটির বার্তা দেখতে পাবেন। আমরা আশা করি যে কোন সম্পাদনা এই সময়ের মধ্যে নষ্ট হবে না, কিন্তু আমরা তার নিশ্চয়তা দিতে পারছি না। আপনি যদি ত্রুটির বার্তা দেখতে পান, তাহলে অনুগ্রহ করে অপেক্ষা করুন যতক্ষণ না সবকিছু স্বাভাবিক অবস্থায় ফিরে আসছে। এরপর আপনি আপনার সম্পাদনা সংরক্ষণ করতে সক্ষম হবেন। কিন্তু, আমরা আপনাকে প্রথমে আপনার পরিবর্তনের একটি অনুলিপি করে রাখার সুপারিশ করছি।
''অন্যান্য প্রভাব'':
*পটভূমির কাজ ধীর হবে এবং কিছু নাও কাজ করতে পারে। লাল লিঙ্ক স্বাভাবিকের মত দ্রুত হালনাগাদ নাও হতে পারে। আপনি যদি একটি নিবন্ধ তৈরি করেন যা ইতিমধ্যে অন্য কোথাও সংযুক্ত আছে, সেক্ষেত্রে লিংক স্বাভাবিকের চেয়ে বেশি সময় ধরে লাল থাকবে। কিছু দীর্ঘ চলমান স্ক্রিপ্ট বন্ধ করতে হবে।
*১০ সেপ্টেম্বর ২০১৮ ও ৮ অক্টোবর ২০১৮ এই দুই সপ্তাহের সময়কালীন কোন কোড হালনাগাদ করা হবে না। কোন অ-অপরিহার্য কোড স্থাপন সঞ্চালিত হবে না।
যদি প্রয়োজন হয় তাহলে এই প্রকল্পটি স্থগিত করা হতে পারে। আপনি [[wikitech:Switch Datacenter#Schedule for 2018 switch|wikitech.wikimedia.org তে সময়সূচী পড়তে পারেন]]। যেকোনো পরিবর্তন সময়সূচীতে ঘোষণা করা হবে। এই সম্পর্কে আরো বিজ্ঞপ্তি দেয়া হবে। '''দয়া করে আপনার সম্প্রদায়কে এই তথ্যটি জানান।''' /<span dir=ltr>[[m:User:Johan (WMF)|User:Johan(WMF)]] ([[m:User talk:Johan (WMF)|talk]])</span>
</div></div> ১৩:৩৩, ৬ সেপ্টেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18333489-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Reminder: No editing for up to an hour on 10 October ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr"><div class="plainlinks">
[[:m:Special:MyLanguage/Tech/Server switch 2018 2|Read this message in another language]] • {{int:please-translate}}
The [https://wikimediafoundation.org/ Wikimedia Foundation] are testing its secondary data center. This will make sure that Wikipedia and the other Wikimedia wikis can stay online even after a disaster.
They switched all traffic to the secondary data center 12 September 2018.
On '''Wednesday, 10 October 2018''', they will switch back to the primary data center.
Unfortunately, because of some limitations in [[mw:Manual:What is MediaWiki?|MediaWiki]], all editing must stop while we switch.
'''You will be able to read, but not edit, all wikis for a short period of time.'''
You will not be able to edit for up to an hour on Wednesday, 10 October. The test will start a bit after [https://www.timeanddate.com/worldclock/fixedtime.html?iso=20181010T14 14:00 UTC] (15:00 BST, 16:00 CEST, 10:00 EDT, 07:00 PDT, 23:00 JST). If you try to edit or save during these times, you will see an error message.
This project may be postponed if necessary. You can [[wikitech:Switch Datacenter#Schedule for 2018 switch|read the schedule at wikitech.wikimedia.org]]. Any changes will be announced in the schedule. '''Please share this information with your community.''' /<span dir=ltr>[[m:User:Johan (WMF)|User:Johan(WMF)]] ([[m:User talk:Johan (WMF)|talk]])</span>
</div></div> ১২:০৩, ৪ অক্টোবর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18363900-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== পশ্চিমবঙ্গ ব্যবহারকারী দলের পক্ষ থেকে আহ্বান ==
সুধী, [[:meta:West Bengal Wikimedians|পশ্চিমবঙ্গ ব্যবহারকারী দলের]] পক্ষ থেকে পশ্চিমবঙ্গের উইকিমিডিয়া সম্প্রদায়ের সকলকে ২০১৯ সালের কার্যকলাপের জন্য প্রকল্প জমা দেওয়ার আহ্বান করা হচ্ছে। [[:meta:West Bengal Wikimedians/Requests|এই পাতায়]] আপনারা আগামী এক বছরে কি উইকিমিডিয়া প্রকল্প রূপায়িত করতে চান, তাতে কত আনুমানিক খরচ হতে পারে, এই প্রকল্প থেকে উইকিমিডিয়ার কি লক্ষ্য পূরণ হবে, ইত্যাদি বিস্তারিত ভাবে জানান। আপনাদের প্রস্তাবিত প্রকল্প বিবেচনা করেই ইউজার গ্রুপের তরফ থেকে উইকিমিডিয়া ফাউন্ডেশনের নিকট হতে র্যাপিড গ্র্যান্টের আবেদন করা হবে এবং তা গৃহীত হলে আপনাদের প্রকল্পের জন্য অর্থ বরাদ্দ হবে। দয়া করে মনে রাখবেন, আনুমানিক লক্ষ্য পূরণ করে প্রকল্প সম্পন্ন বলে ঘোষণা করলে ও সমস্ত রসিদের স্ক্যান জমা দিলে তবেই ইউজার গ্রুপ থেকে প্রকল্প খরচ পাঠিয়ে দেওয়া হবে, তার আগে নয়। যে কোন কারণে তা পূরণে ব্যর্থ হলে, ইউজার গ্রুপ খরচ পাঠাতে দায়বদ্ধ থাকবে না। আগামী এক মাস ধরে আপনাদের বিস্তারিত প্রকল্প জমা নেওয়া হবে। ধন্যবাদান্তে, -- পশ্চিমবঙ্গ ব্যবহারকারী দলের পক্ষে থেকে [[ব্যবহারকারী:Bodhisattwa|বোধিসত্ত্ব]] ([[ব্যবহারকারী আলাপ:Bodhisattwa|আলাপ]]) ১৬:৪১, ১২ অক্টোবর ২০১৮ (ইউটিসি)
== The Community Wishlist Survey ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr"><div class="plainlinks">
The Community Wishlist Survey. {{Int:Please-translate}}.
Hey everyone,
The Community Wishlist Survey is the process when the Wikimedia communities decide what the Wikimedia Foundation [[m:Community Tech|Community Tech]] should work on over the next year.
The Community Tech team is focused on tools for experienced Wikimedia editors. You can post technical proposals from now until 11 November. The communities will vote on the proposals between 16 November and 30 November. You can read more on the [[m:Special:MyLanguage/Community Wishlist Survey 2019|wishlist survey page]].
<span dir=ltr>/[[m:User:Johan (WMF)|User:Johan (WMF)]]</span></div></div> ১১:০৫, ৩০ অক্টোবর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18458512-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Change coming to how certain templates will appear on the mobile web ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
'''Change coming to how certain templates will appear on the mobile web'''
{{int:please-translate}}
[[File:Page_issues_-_mobile_banner_example.jpg|thumb|Example of improvements]]
Hello,
In a few weeks the Readers web team will be changing how some templates look on the mobile web site. We will make these templates more noticeable when viewing the article. We ask for your help in updating any templates that don't look correct.
What kind of templates? Specifically templates that notify readers and contributors about issues with the content of an article – the text and information in the article. Examples like [[wikidata:Q5962027|Template:Unreferenced]] or [[Wikidata:Q5619503|Template:More citations needed]]. Right now these notifications are hidden behind a link under the title of an article. We will format templates like these (mostly those that use Template:Ambox or message box templates in general) to show a short summary under the page title. You can tap on the "Learn more" link to get more information.
For template editors we have [[mw:Recommendations_for_mobile_friendly_articles_on_Wikimedia_wikis#Making_page_issues_(ambox_templates)_mobile_friendly|some recommendations on how to make templates that are mobile-friendly]] and also further [[mw:Reading/Web/Projects/Mobile_Page_Issues|documentation on our work so far]].
If you have questions about formatting templates for mobile, [[mw:Talk:Reading/Web/Projects/Mobile_Page_Issues|please leave a note on the project talk page]] or [https://phabricator.wikimedia.org/maniphest/task/edit/form/1/?projects=Readers-Web-Backlog file a task in Phabricator] and we will help you.
{{Int:Feedback-thanks-title}}
</div> [[m:User:CKoerner (WMF)|CKoerner (WMF)]] ([[m:User talk:CKoerner (WMF)|talk]]) ১৯:৩৪, ১৩ নভেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18543269-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:CKoerner (WMF)@metawiki পাঠিয়েছেন -->
== Community Wishlist Survey vote ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr"><div class="plainlinks">
The Community Wishlist Survey. {{Int:Please-translate}}.
Hey everyone,
The Community Wishlist Survey is the process when the Wikimedia communities decide what the Wikimedia Foundation [[m:Community Tech|Community Tech]] should work on over the next year.
The Community Tech team is focused on tools for experienced Wikimedia editors. The communities have now posted a long list of technical proposals. You can vote on the proposals from now until 30 November. You can read more on the [[m:Special:MyLanguage/Community Wishlist Survey 2019|wishlist survey page]].
<span dir=ltr>/[[m:User:Johan (WMF)|User:Johan (WMF)]]</span></div></div> ১৮:১৩, ২২ নভেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18543269-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Advanced Search ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
[[m:WMDE_Technical_Wishes/AdvancedSearch|Advanced Search]] will become a default feature on your wiki on November 28. This new interface allows you to perform specialized searches on the [[Special:Search|search page]], even if you don’t know any [[mw:Special:MyLanguage/Help:CirrusSearch|search syntax]]. Advanced Search originates from the [[m:WMDE_Technical_Wishes|German Community’s Technical Wishes project]]. It's already a default feature on German, Arabic, Farsi and Hungarian Wikipedia. Besides, more than 40.000 users across all wikis have tested the beta version. Feedback is welcome on the [[mw:Help talk:Extension:AdvancedSearch|central feedback page]].</div> [[m:User:Johanna Strodt (WMDE)|Johanna Strodt (WMDE)]] ([[m:User talk:Johanna Strodt (WMDE)|talk]]) ১০:৫৭, ২৬ নভেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=WMDE_Technical_Wishes/Technical_Wishes_News_list_1&oldid=17995466-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johanna Strodt (WMDE)@metawiki পাঠিয়েছেন -->
== ইংরেজি থেকে বাংলা ==
Best use before 2 years after
== New Wikimedia password policy and requirements ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
{{int:please-translate}}
The Wikimedia Foundation security team is implementing a new [[m:Password policy|password policy and requirements]]. [[mw:Wikimedia_Security_Team/Password_strengthening_2019|You can learn more about the project on MediaWiki.org]].
These new requirements will apply to new accounts and privileged accounts. New accounts will be required to create a password with a minimum length of 8 characters. Privileged accounts will be prompted to update their password to one that is at least 10 characters in length.
These changes are planned to be in effect on December 13th. If you think your work or tools will be affected by this change, please let us know on [[mw:Talk:Wikimedia_Security_Team/Password_strengthening_2019|the talk page]].
{{Int:Feedback-thanks-title}}
</div> [[m:User:CKoerner (WMF)|CKoerner (WMF)]] ([[m:User talk:CKoerner (WMF)|talk]]) ২০:০২, ৬ ডিসেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18639017-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:CKoerner (WMF)@metawiki পাঠিয়েছেন -->
== New Wikimedia password policy and requirements ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
{{int:please-translate}}
The Wikimedia Foundation security team is implementing a new [[m:Password policy|password policy and requirements]]. [[mw:Wikimedia_Security_Team/Password_strengthening_2019|You can learn more about the project on MediaWiki.org]].
These new requirements will apply to new accounts and privileged accounts. New accounts will be required to create a password with a minimum length of 8 characters. Privileged accounts will be prompted to update their password to one that is at least 10 characters in length.
These changes are planned to be in effect on December 13th. If you think your work or tools will be affected by this change, please let us know on [[mw:Talk:Wikimedia_Security_Team/Password_strengthening_2019|the talk page]].
{{Int:Feedback-thanks-title}}
</div> [[m:User:CKoerner (WMF)|CKoerner (WMF)]] ([[m:User talk:CKoerner (WMF)|talk]]) ২১:২১, ৬ ডিসেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:CKoerner_(WMF)/Sandbox&oldid=18693867-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:CKoerner (WMF)@metawiki পাঠিয়েছেন -->
== Selection of the Tremendous Wiktionary User Group representative to the Wikimedia Summit 2019 ==
Dear all,
Sorry for posting this message in English and last minute notification. The [[:m:Tremendous Wiktionary User Group|Tremendous Wiktionary User Group]] could send one representative to the [[:m:Wikimedia Summit 2019|Wikimedia Summit 2019]] (formerly "Wikimedia Conference"). The Wikimedia Summit is an yearly conference of all organizations affiliated to the Wikimedia Movement (including our Tremendous Wiktionary User Group). It is a great place to talk about Wiktionary needs to the chapters and other user groups that compose the Wikimedia movement.
For context, there is a [[:m:Wikimedia Conference 2018/Further reports/Tremendous Wiktionary User Group|short report on what happened last year]]. The deadline is very close to 24 hrs. The last date for registration is 17 December 2018. As a last minute effort, there is a '''[[:m:Tremendous Wiktionary User Group/Wikimedia Summit 2019|page on meta to decide who will be the representative of the user group to the Wikimedia Summit]]''' created.
Please feel free to ask any question on the [https://lists.wikimedia.org/mailman/listinfo/wiktionary-l wiktionary-l] mailing list or on the [[:m:Talk:Wiktionary/Tremendous Wiktionary User Group|talk page]].
For the [[:m:Tremendous Wiktionary User Group|Tremendous Wiktionary User Group]],
-- [[User:Balajijagadesh|Balajijagadesh]] ০৫:৫৬, ১৬ ডিসেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery/Wiktionary&oldid=18299588-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:KCVelaga@metawiki পাঠিয়েছেন -->
== Invitation from Wiki Loves Love 2019 ==
<div lang="en" dir="ltr" class="mw-content-ltr">
{{int:please-translate}}
[[File:WLL Subtitled Logo (transparent).svg|right|frameless]]
Love is an important subject for humanity and it is expressed in different cultures and regions in different ways across the world through different gestures, ceremonies, festivals and to document expression of this rich and beautiful emotion, we need your help so we can share and spread the depth of cultures that each region has, the best of how people of that region, celebrate love.
[[:c:Commons:Wiki Loves Love|Wiki Loves Love (WLL)]] is an international photography competition of Wikimedia Commons with the subject love testimonials happening in the month of February.
The primary goal of the competition is to document love testimonials through human cultural diversity such as monuments, ceremonies, snapshot of tender gesture, and miscellaneous objects used as symbol of love; to illustrate articles in the worldwide free encyclopedia Wikipedia, and other Wikimedia Foundation (WMF) projects.
The theme of 2019 iteration is '''''Celebrations, Festivals, Ceremonies and rituals of love.'''''
Sign up your affiliate or individually at [[:c:Commons:Wiki Loves Love 2019/Participants|Participants]] page.
To know more about the contest, check out our [[:c:Commons:Wiki Loves Love 2019|Commons Page]] and [[:c:Commons:Wiki Loves Love 2018/FAQ|FAQs]]
There are several prizes to grab. Hope to see you spreading love this February with Wiki Loves Love!
Kind regards,
[[:c:Commons:Wiki Loves Love 2018/International Team|Wiki Loves Love Team]]
Imagine... the sum of all love!
</div>
--[[ব্যবহারকারী:MediaWiki message delivery|MediaWiki message delivery]] ([[ব্যবহারকারী আলাপ:MediaWiki message delivery|আলাপ]]) ১০:১২, ২৭ ডিসেম্বর ২০১৮ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18639017-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Tiven2240@metawiki পাঠিয়েছেন -->
== FileExporter beta feature ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr">
[[File:Logo for FileExporter.svg|thumb|Coming soon: the beta feature [[m:WMDE_Technical_Wishes/Move_files_to_Commons|FileExporter]]]]
A new beta feature will soon be released on all wikis: The [[m:WMDE_Technical_Wishes/Move_files_to_Commons|FileExporter]]. It allows exports of files from a local wiki to Wikimedia Commons, including their file history and page history. Which files can be exported is defined by each wiki's community: '''Please check your wiki's [[m:WMDE_Technical_Wishes/Move_files_to_Commons/Configuration file documentation|configuration file]]''' if you want to use this feature.
The FileExporter has already been a beta feature on [https://www.mediawiki.org mediawiki.org], [https://meta.wikimedia.org meta.wikimedia], deWP, faWP, arWP, koWP and on [https://wikisource.org wikisource.org]. After some functionality was added, it's now becoming a beta feature on all wikis. Deployment is planned for January 16. More information can be found [[m:WMDE_Technical_Wishes/Move_files_to_Commons|on the project page]].
As always, feedback is highly appreciated. If you want to test the FileExporter, please activate it in your [[Special:Preferences#mw-prefsection-betafeatures|user preferences]]. The best place for feedback is the [[mw:Help_talk:Extension:FileImporter|central talk page]]. Thank you from Wikimedia Deutschland's [[m:WMDE Technical Wishes|Technical Wishes project]].
</div> [[User:Johanna Strodt (WMDE)|Johanna Strodt (WMDE)]] ০৯:৪১, ১৪ জানুয়ারি ২০১৯ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=WMDE_Technical_Wishes/Technical_Wishes_News_list_all_village_pumps&oldid=18782700-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johanna Strodt (WMDE)@metawiki পাঠিয়েছেন -->
== No editing for 30 minutes 17 January ==
<div lang="en" dir="ltr" class="mw-content-ltr">You will '''not be able to edit''' the wikis for up to 30 minutes on '''[https://www.timeanddate.com/worldclock/fixedtime.html?iso=20190117T07 17 January 07:00 UTC]'''. This is because of a database problem that has to be fixed immediately. You can still read the wikis. Some wikis are not affected. They don't get this message. You can see which wikis are '''not''' affected [[:m:User:Johan (WMF)/201901ReadOnlyPage|on this page]]. Most wikis are affected. The time you can't edit might be shorter than 30 minutes. /[[User:Johan (WMF)|Johan (WMF)]]</div> ১৪:৪৭, ১৬ জানুয়ারি ২০১৯ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=User:Johan_(WMF)/201901ReadOnly/Targets&oldid=18788945-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== আলোচনা নিয়ে আমাদের সাথে আলোচনা করুন ==
<div class="plainlinks mw-content-ltr" lang="bn" dir="ltr">
[[File:OOjs_UI_icon_speechBubbles-rtl.svg|alt="আইকন দুটি বক্তৃতা বুদবুদ চিত্রিত করতে"|frameless|right|120px]]
{{int:please-translate}}
উইকিমিডিয়া ফাউন্ডেশন [[mw:Talk pages consultation 2019|যোগাযোগ সম্পর্কে বৈশ্বিক পরামর্শ]] নিতে পরিকল্পনা করছে। এর লক্ষ্য হচ্ছে যোগাযোগের জন্য সরঞ্জামগুলিকে উন্নত করতে উইকিমিডিয়ান এবং উইকি-মনস্ক ব্যক্তিদের একত্রিত করা।
আমরা চাই অভিজ্ঞতা, দক্ষতা বা ডিভাইস নির্বিশেষে সব অবদান রাখা উইকিসমূহ একে অপরের সাথে কথা বলতে সক্ষম হোক।
আমরা যতটা সম্ভব উইকিমিডিয়া সম্প্রদায়ের বিভিন্ন অংশ থেকে ইনপুট খুঁজছি। এটি একাধিক প্রকল্প, একাধিক ভাষা, এবং একাধিক দৃষ্টিকোণ থেকে হতে পারে।
আমরা বর্তমানে পরামর্শ নেয়ার পরিকল্পনা করছি। আমাদের আপনার সাহায্য দরকার।
'''আমাদের স্বেচ্ছাসেবক প্রয়োজন যারা তাদের সম্প্রদায় বা ব্যবহারকারী দলের সাথে কথা বলতে সাহায্য করবে।'''
আপনি আপনার উইকিতে একটি আলোচনা আয়োজন করে, বা বিদ্যমান একটিতে অংশগ্রহণ করে সাহায্য করতে পারেন। কি করতে হবে এখানে তা দেয়া হল:
# প্রথমে, [[mw:Talk pages consultation 2019/Participant group sign-up|এখানে আপনার দল নিবন্ধিত করুন অথবা একটি ইতিমধ্যে বিদ্যমান কিনা তা পরীক্ষা করুন]]।
# যদি কোন দলের অস্তিত্ব না থাকে, তবে আপনার গোষ্ঠীর অন্যান্য ব্যক্তিদের কাছ থেকে তথ্য সংগ্রহের জন্য একটি পৃষ্ঠা তৈরি করুন (অথবা আলোচনাসভায় একটি অনুচ্ছেদ, অথবা একটি ই-মেইল থ্রেড তৈরি করুন - আপনার গোষ্ঠীর জন্য যেটি ভালো মনে করেন)। এটি কোনো ভোট বা সিদ্ধান্ত গ্রহণের আলোচনা নয়: আমরা কেবল প্রতিক্রিয়া সংগ্রহ করছি।
# তারপর যোগাযোগের প্রক্রিয়া সম্পর্কে তারা কী ভাবছেন তা জিজ্ঞাসা করুন। লোকেরা কীভাবে উইকি-র ভিতরে এবং বাইরে একে অপরের সাথে যোগাযোগ করে সে সম্পর্কে গল্প এবং অন্যান্য তথ্য আমরা শুনতে চাই। এই পাঁচটি প্রশ্ন জিজ্ঞাসা করতে বিবেচনা করুন:
## যখন আপনি আপনার সম্প্রদায়ের সাথে একটি বিষয় নিয়ে আলোচনা করতে চান, কোন সরঞ্জাম আপনার কাজে আসে এবং কোন সমস্যাগুলি আপনাকে বাধা দেয়?
## কীভাবে নতুনরা আলোচনার পৃষ্ঠাগুলি ব্যবহার করে এবং কী তাঁদের এটি ব্যবহার করা থেকে বাধা দেয়?
## আপনার সম্প্রদায়ে আলোচনার পৃষ্ঠাগুলিতে অন্যরা কি নিয়ে সমস্যায় পড়েন?
## এমন কি আছে যা আপনি আলোচনার পাতাগুলিতে করতে চান, কিন্তু প্রযুক্তিগত সীমাবদ্ধতার কারণে করতে পারছেন না?
## একটি "উইকি আলোচনা"-এর গুরুত্বপূর্ণ দিকগুলি কি কি?
# সর্বশেষ, দয়া করে [[mw:Talk:Talk pages consultation 2019|Mediawiki.org সাইটে আলাপ পাতার জন্য পরামর্শ ২০১৯]] পাতায় যান ও আপনি আপনার দল থেকে কি শিখেছেন তা প্রতিবেদন করুন। আলোচনা প্রকাশ্যে উপলব্ধ হলে দয়া করে লিঙ্ক অন্তর্ভুক্ত করুন।
'''এছাড়াও আপনি একে অপরের সাথে কথা বলার নানাবিধ উপায়ের তালিকা তৈরি করতে সাহায্য করতে পারেন।'''
সব দল উইকিতে সক্রিয় না বা উইকিতে জিনিসগুলি নিয়ে আলোচনা করার জন্য একই পদ্ধতি ব্যবহার করে না: বহিঃস্থ সরঞ্জামগুলির মাধ্যমে এটি উইকিতে, সামাজিক নেটওয়ার্কগুলিতে ঘটতে পারে... আমাদের বলুন [[mw:Talk pages consultation 2019/Tools in use|কীভাবে আপনার দল যোগাযোগ করে]]।
আপনি mediawiki.org সাইটে [[mw:Talk pages consultation 2019|সামগ্রিক প্রক্রিয়া]] সম্পর্কে আরো পড়তে পারেন। যদি আপনার কোন প্রশ্ন বা ধারণা থাকে, তবে আপনি আপনার পছন্দের ভাষায় [[mw:Talk:Talk pages consultation 2019|পরামর্শের প্রক্রিয়া সম্পর্কে প্রতিক্রিয়া জানাতে পারেন]]।
ধন্যবাদ! আমরা আপনার সাথে কথা বলতে উন্মুখ হয়ে আছি।
</div> [[user:Trizek (WMF)|Trizek (WMF)]] ১৫:০০, ২১ ফেব্রুয়ারি ২০১৯ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18639017-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Trizek (WMF)@metawiki পাঠিয়েছেন -->
== নতুন নামস্থান ==
মিত্রাক্ষর নামে একটা নতুন নামস্থান (নেমস্পেস) দরকার। ইংরেজিতে Rhymes নামে যেমন রয়েছে। -- [[ব্যবহারকারী:Muhammad|Muhammad]] ([[ব্যবহারকারী আলাপ:Muhammad|আলাপ]]) ০৯:৪৯, ২৪ ফেব্রুয়ারি ২০১৯ (ইউটিসি)
== Read-only mode for up to 30 minutes on 11 April ==
<div class="plainlinks mw-content-ltr" lang="en" dir="ltr"><div class="plainlinks">
<div lang="en" dir="ltr" class="mw-content-ltr">You will '''not be able to edit''' most Wikimedia wikis for up to 30 minutes on '''[https://www.timeanddate.com/worldclock/fixedtime.html?iso=20190411T05 11 April 05:00 UTC]'''. This is because of a hardware problem. You can still read the wikis. You [[phab:T220080|can see which wikis are affected]]. The time you can not edit might be shorter than 30 minutes. /[[User:Johan (WMF)|Johan (WMF)]]</div></div></div> ১০:৫৬, ৮ এপ্রিল ২০১৯ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18979889-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Johan (WMF)@metawiki পাঠিয়েছেন -->
== Wikimedia Foundation Medium-Term Plan feedback request ==
{{int:please-translate}}
<div lang="en" dir="ltr" class="mw-content-ltr">The Wikimedia Foundation has published a [[m:Special:MyLanguage/Wikimedia_Foundation_Medium-term_plan_2019|Medium-Term Plan proposal]] covering the next 3–5 years. We want your feedback! Please leave all comments and questions, in any language, on [[m:Talk:Wikimedia_Foundation_Medium-term_plan_2019|the talk page]], by April 20. {{Int:Feedback-thanks-title}} [[m:User:Quiddity (WMF)|Quiddity (WMF)]] ([[m:User talk:Quiddity (WMF)|talk]]) ১৭:৩৫, ১২ এপ্রিল ২০১৯ (ইউটিসি)</div>
<!-- https://meta.wikimedia.org/w/index.php?title=Distribution_list/Global_message_delivery&oldid=18998727-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:Quiddity (WMF)@metawiki পাঠিয়েছেন -->
p6a11tnbebhxi446rqh23ldyjtxmypz
ব্যবহারকারী আলাপ:Redmin
3
24780
507778
507499
2026-04-13T15:19:44Z
MediaWiki message delivery
2534
/* Tech News: 2026-16 */ নতুন অনুচ্ছেদ
507778
wikitext
text/x-wiki
== I seen your RedminBot Tamil entries Creations ==
I seen your RedminBot Tamil entries Creations . That's was really nice. I hope you are interested in Tamil entries creations. Thanks for your continuous support! i created a spreadsheet file for Creating Tamil entries in bnwikt [https://docs.google.com/spreadsheets/d/1KDnIFSo8mCgN5Cb4GVxMv_gcgecZxjdFzQso9l-VhWg/edit#gid=0 See this]. This maybe useful for your bot Tamil entries. Thanks Again. [[ব্যবহারকারী:Sriveenkat|Sriveenkat]] ([[ব্যবহারকারী আলাপ:Sriveenkat|আলাপ]]) ০৭:৩৩, ৫ অক্টোবর ২০২৩ (ইউটিসি)
:Thank you for this! I will make entries manually as this is too complex for the bot but this really helps. :D [[ব্যবহারকারী:Redmin|Redmin]] ([[ব্যবহারকারী আলাপ:Redmin#top|আলাপ]]) ০৯:৩৭, ৫ অক্টোবর ২০২৩ (ইউটিসি)
::Ok Redmin Thanks. We will do manually Thanks Again. [[ব্যবহারকারী:Sriveenkat|Sriveenkat]] ([[ব্যবহারকারী আলাপ:Sriveenkat|আলাপ]]) ০৯:৫৭, ৫ অক্টোবর ২০২৩ (ইউটিসি)
== পর্যালোচনা ==
[[နာမ်]] এর বিশেষ্যের জন্য মডিউল তৈরি করেছি: [[মডিউল:my-headword]]।
ইংরেজিতে [[https://en.m.wiktionary.org/wiki/Module:my-headword Module:my-headword]]
কিন্তু কাজ হচ্ছে না কেন? [[ব্যবহারকারী:খালিদ জে. হোসেইন|哈立德]] ১৮:২৩, ২৮ ডিসেম্বর ২০২৩ (ইউটিসি)
:@[[ব্যবহারকারী:খালিদ জে. হোসেইন|খালিদ জে. হোসেইন]], এখনও কি একই অবস্থা? [[ব্যবহারকারী:Redmin|Redmin]] ([[ব্যবহারকারী আলাপ:Redmin#top|আলাপ]]) ১৪:০৪, ৩ জানুয়ারি ২০২৪ (ইউটিসি)
::জ্বি, এখন ঠিক আছে। ধন্যবাদ। [[ব্যবহারকারী:খালিদ জে. হোসেইন|哈立德]] ০১:৩১, ৫ জানুয়ারি ২০২৪ (ইউটিসি)
== <span lang="en" dir="ltr" class="mw-content-ltr">Reminder to vote now to select members of the first U4C</span> ==
<div lang="en" dir="ltr" class="mw-content-ltr">
<section begin="announcement-content" />
:''[[m:Special:MyLanguage/Universal Code of Conduct/Coordinating Committee/Election/2024/Announcement – vote reminder|You can find this message translated into additional languages on Meta-wiki.]] [https://meta.wikimedia.org/w/index.php?title=Special:Translate&group=page-{{urlencode:Universal Code of Conduct/Coordinating Committee/Election/2024/Announcement – vote reminder}}&language=&action=page&filter= {{int:please-translate}}]''
Dear Wikimedian,
You are receiving this message because you previously participated in the UCoC process.
This is a reminder that the voting period for the Universal Code of Conduct Coordinating Committee (U4C) ends on May 9, 2024. Read the information on the [[m:Universal Code of Conduct/Coordinating Committee/Election/2024|voting page on Meta-wiki]] to learn more about voting and voter eligibility.
The Universal Code of Conduct Coordinating Committee (U4C) is a global group dedicated to providing an equitable and consistent implementation of the UCoC. Community members were invited to submit their applications for the U4C. For more information and the responsibilities of the U4C, please [[m:Universal Code of Conduct/Coordinating Committee/Charter|review the U4C Charter]].
Please share this message with members of your community so they can participate as well.
On behalf of the UCoC project team,<section end="announcement-content" />
</div>
[[m:User:RamzyM (WMF)|RamzyM (WMF)]] ২৩:১৬, ২ মে ২০২৪ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Universal_Code_of_Conduct/Coordinating_Committee/Election/2024/Previous_voters_list&oldid=26721206-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:RamzyM (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-09</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W09"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/09|Translations]] are available.
'''Weekly highlight'''
* [[mw:Special:MyLanguage/Edit check/Reference Check|Reference Check]] has been deployed to English Wikipedia, completing its rollout across all Wikipedias. The feature prompts newcomers to add a citation before publishing new content, helping reduce common citation-related reverts and improve verifiability. In A/B testing, the impact was substantial: newcomers shown Reference Check were approximately 2.2 times more likely to include a reference on desktop and about 17.5 times more likely on mobile web. [https://analytics.wikimedia.org/published/reports/editing/reference_check_ab_test_report_final_2025.html]
'''Updates for editors'''
* The [[mw:Special:MyLanguage/Extension:InterwikiSorting|InterwikiSorting extension]], which allowed for the [[m:Special:MyLanguage/Interwiki sorting order|sorting of interwiki links]], has been undeployed from Wikipedia. As a result, editors who had enabled interwiki link sorting in non-compact mode (full list format) will now see links reordered. The links moving forward will be listed in the alphabetical order of language code. [https://phabricator.wikimedia.org/T253764]
* Later this week, people who are editing a page-section using the mobile visual editor, will notice a new "Edit full page" button. When tapped, you will be able to edit the entire article. This helps when the change you want to make is outside the section you initially opened. [https://phabricator.wikimedia.org/T387175][https://phabricator.wikimedia.org/T409112]
* [[mw:Special:MyLanguage/Readers/Reader Experience|The Reader Experience team]] is inviting editors to assess whether dark mode should still be considered "beta" on their wiki, based on their experience of how well it functions on desktop and mobile. If the feature is deemed mature, editors can update the interface messages in <code dir=ltr>MediaWiki:skin-theme-description</code> and <code dir=ltr>MediaWiki:Vector-night-mode-beta-tag</code> to indicate that dark mode is ready and no longer considered beta.
* The improved [[mw:Wikimedia_Apps/Team/iOS/Activity_Tab|Activity tab]] which displays user-insights is now available to all users of the Wikipedia iOS app (version 7.9.0 and later). Following earlier A/B testing that showed higher account creation among users with access to the feature, it has been rolled out to 100% of users along with some updates. The Activity tab now shows your edited articles in the timeline, offers editing impact insights like contribution counts and article view trends, and customization options to improve in-app experience for users.
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:21}} community-submitted {{PLURAL:21|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, a bug that prevented [[mw:Special:MyLanguage/Extension:DiscussionTools|DiscussionTools]] from working on mobile has now been fixed, restoring full functionality. [https://phabricator.wikimedia.org/T415303]
'''Updates for technical contributors'''
* The [[m:Special:GlobalWatchlist|Global Watchlist]] lets you view your watchlists from multiple wikis on one page. The [[mw:Special:MyLanguage/Extension:GlobalWatchlist|extension]] that makes this possible continues to improve. The latest upgrade is the inclusion of a [[mw:Extension:GlobalWatchlist#hook|new hook]], <code dir=ltr>ext.globalwatchlist.rebuild</code>, which fires after each watchlist rebuild. This allows you to run gadgets and user scripts for the Special page. [https://phabricator.wikimedia.org/T275159]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.17|MediaWiki]]
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/09|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W09"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৯:০৪, ২৩ ফেব্রুয়ারি ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30119102-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-10</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W10"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/10|Translations]] are available.
'''Weekly highlight'''
* Wikipedia 25 [[m:Special:MyLanguage/Wikipedia 25/Easter egg experiments|Birthday mode]] is now live on Betawi, Breton, Chinese, Czech, Dutch, English, French, Gorontalo, Indonesian, Italian, Luxembourgish, Madurese, Sicilian, Spanish, Thai, and Vietnamese Wikipedias! This limited-time campaign feature celebrates 25 years of Wikipedia with a birthday mascot, Baby Globe. When turned on, Baby Globe is shown on [[m:Special:MyLanguage/Wikipedia 25/Easter egg experiments/article configuration|~2,500 articles]], waiting to be discovered by readers. Communities can choose to turn Birthday mode on by getting consensus from their community and asking an admin to enable the feature and customize it via [[m:Special:MyLanguage/Wikipedia 25/Easter egg experiments#Community Configuration Demo|community configuration]] on the local wiki.
'''Updates for editors'''
* [[:m:Special:MyLanguage/WMDE Technical Wishes/Sub-referencing|Sub-referencing]], a new feature to re-use references with different details has been released to Swedish Wikipedia, Polish Wikipedia and [[:phab:T418209|a couple of other wikis]]. You can [[:m:Special:MyLanguage/WMDE Technical Wishes/Sub-referencing#test|try the feature]] on these projects or on testwiki and [https://en.wikipedia.beta.wmcloud.org/wiki/Sub-referencing betawiki]. Learnings from the first pilot wiki German Wikipedia have been [[:m:Special:MyLanguage/WMDE Technical Wishes/Sub-referencing/Learnings|published in a report]]. Reach out to the Wikimedia Deutschland team if you are [[:m:Talk:WMDE Technical Wishes/Sub-referencing#Pilot wikis|interested in becoming a pilot wiki]].
* [[mw:Special:MyLanguage/Help:Edit check#Paste check|Paste Check]] will become available at all Wikipedias this week. The feature prompts newcomers who are pasting text they are not likely to have written into VisualEditor to consider whether doing so risks a copyright violation. Paste Check [[mw:Special:MyLanguage/Edit check/Tags|tags]] all edits where it is shown for potential review. Local administrators can configure various aspects of the feature via [[{{#special:EditChecks}}]]. [[mw:Special:MyLanguage/Edit check/Paste Check#A/B Experiment|Research]] across 22 wikis found that Paste Check resulted in an 18% decrease in relative reverted-edits compared to the control group. Translators can [https://translatewiki.net/w/i.php?title=Special%3ATranslate&group=ext-visualeditor-ve-mw-editcheck&filter=&optional=1&action=translate help to localize] this and related features.
* The [[mw:Special:MyLanguage/Readers/Reader Experience|Reader Experience team]] will be standardizing the user menu in the top right for all mobile users so that it is closer to the desktop experience. Currently this user menu is only visible to users with Advanced Mobile Controls (AMC) turned on. The only change is that a couple buttons previously in the left-side menu will move to the top right for users who do not have AMC turned on. This change is expected to go out March 9 and seeks to improve the user interface. [https://phabricator.wikimedia.org/T413912]
* Starting in the week of March 2, the emails sent out when an email address was added, removed, or changed for an account will switch to a substantially nicer and clearer HTML email from the prior plaintext one. [https://phabricator.wikimedia.org/T410807]
* Notifications are currently limited to 2,000 historic entries per user, and extend back to 2013 when the feature was released. This is going to be changed to only store Notifications from the last 5 years, but up to 10,000 of them. This will help with long-term infrastructure health and help to prevent more recent notifications from disappearing too soon. [https://phabricator.wikimedia.org/T383948]
* The [[m:Special:GlobalWatchlist|Global Watchlist]] which lets you view your watchlists from multiple wikis on a single page continues to see improvements. The latest update improves label usage experience. The [[mw:Special:MyLanguage/Extension:GlobalWatchlist|extension]] now allows activating the [[mw:Special:MyLanguage/Manual:Language#Fallback languages|language fallback system]] for Wikidata items without labels in the viewed language, and showing those labels in the user’s preferred Wikidata language if no <code dir=ltr>uselang=</code> URL parameter is provided. [https://phabricator.wikimedia.org/T373686][https://phabricator.wikimedia.org/T416111]
* The Wikipedia Android team has started a beta test of [[mw:Special:MyLanguage/Readers/Information Retrieval/Phase 1|hybrid search]] on Greek Wikipedia. Hybrid search capabilities can handle both semantic and keyword queries enabling readers to find what they’re looking for directly on Wikipedia more easily.
* For security reasons, members of certain user groups are [[m:Special:MyLanguage/Mandatory two-factor authentication for users with some extended rights|required to have two-factor authentication]] (2FA) enabled. Currently, 2FA is required to use the group, but not to be a member of it. Given that this model still has some vulnerabilities, the situation will [[phab:T418580|gradually change in March]]. Members of these groups will be unable to disable last 2FA method on their account, and it will be impossible to add users without 2FA to these groups. Users will still be able to add new authentication methods or remove them, as long as at least one method is continuously enabled. In the second half of March, users without 2FA will be removed from these groups. This applies to: CentralNotice administrators, checkusers, interface administrators, suppressors, Wikidata staff, Wikifunctions staff, WMF Office IT and WMF Trust & Safety. Nothing will change for other users. See the linked task for deployment schedule. [https://phabricator.wikimedia.org/T418580]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:27}} community-submitted {{PLURAL:27|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, the issue preventing users from creating an instance in [https://www.wikibase.cloud/ Wikibase.cloud] has now been fixed. [https://phabricator.wikimedia.org/T416807]
'''Updates for technical contributors'''
* To help ensure [[mw:Special:MyLanguage/MediaWiki Product Insights/Responsible Reuse|fair use of infrastructure]], over the next month the Wikimedia Foundation will implement global API rate limits across our APIs. In early March, stricter limits will be applied to unidentified requests from outside Toolforge/WMCS and API requests that are made from web browsers. In April, higher limits will be applied to identified traffic. These limits are intentionally set as high as possible to minimise impact on the community. Bots running in Toolforge/WMCS or with the bot user right on any wiki should not be affected for now. However, all developers are advised to follow updated best practices. For more information, see [[mw:Special:MyLanguage/Wikimedia APIs/Rate limits|Wikimedia APIs/Rate limits]].
* The Wikidata Query Service Linked Data Fragment (LDF) endpoint will be decommissioned in February. This endpoint served limited traffic, which was successfully migrated to other data access methods that were better suited to support existing use cases. The hardware used to support the LDF endpoint will be reallocated to support the ongoing backend migration efforts. [https://phabricator.wikimedia.org/T415696]
* The new Parsoid parser [[mw:Special:MyLanguage/Parsoid/Parser Unification/Updates|continues to be deployed to additional wikis]], improving platform sustainability and making it easier to introduce new reading and editing features. Parsoid is now the default parser on 488 WMF wikis (268 Wikipedias), now covering more than 10% of all Wikipedia page views.
* The process and criteria for [[Special:MyLanguage/Wikimedia Enterprise#Access|requesting exceptional access]] to the high volume feed of the ''Wikimedia Enterprise'' APIs (at no cost for mission-aligned usecases), [[m:Talk:Wikimedia Enterprise#Exceptional access criteria|have now been published]]. This is to provide more thorough and clearer documentation for users.
* [https://techblog.wikimedia.org/ Tech Blog], the blog dedicated to the Wikimedia technical community [https://techblog.wikimedia.org/2026/02/24/a-tech-blog-diff/ will be migrating] to [[diffblog:|Diff]], the community news and event blog. The migration should be complete in April 2026, after which new posts will be accepted for publishing. Readers will be able to access posts – old and new – on the landing page at https://diff.wikimedia.org/techblog.
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.18|MediaWiki]]
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/10|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W10"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৭:৫২, ২ মার্চ ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30137798-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-11</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W11"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/11|Translations]] are available.
'''Weekly highlight'''
* [[m:Special:MyLanguage/Tech/Server switch|All wikis will be read-only]] for a few minutes on Wednesday, 25 March 2026 at [https://zonestamp.toolforge.org/1774450800 15:00 UTC]. This is for the datacenter server switchover backup tests, [[wikitech:Deployments/Yearly calendar|which happen twice a year]]. During the switchover, all Wikimedia website traffic is shifted from one primary data center to the backup data center to test availability and prevent service disruption even in emergencies.
* Last week, all wikis had 2 hours of read-only time, and extended unavailability for user-scripts and gadgets. This was due to a security incident which has since been resolved. Work is ongoing to prevent re-occurrences. For current information please see the [[m:Steward's noticeboard#Statement on Meta about today's user script security incident|post on the Stewards' noticeboard]] ([[m:Special:MyLanguage/Wikimedia Foundation/Product and Technology/Product Safety and Integrity/March 2026 User Script Incident|translations]]).
'''Updates for editors'''
* Users facing multiple blocks on mobile will now see the reasons for each block separately, instead of a generic message. This helps them understand why they are blocked and what steps they can take to resolve the issue. For example, users affected for using common VPNs (such as [[Special:MyLanguage/Apple iCloud Private Relay|iCloud Private Relay]]) will receive clearer guidance on what they need to do to start editing again. [https://phabricator.wikimedia.org/T357118]
* Later this week, [[mw:Special:MyLanguage/VisualEditor/Suggestion Mode|Suggestion Mode]] will become available as a beta feature within the visual editor at all Wikipedias. This feature proactively suggests various types of actions that people can consider taking to improve Wikipedia articles, and learn about related guidelines. The feature is locally configurable, and can also be locally expanded with custom Suggestions. Current settings can be seen at [[Special:EditChecks]] and there are [[mw:Special:MyLanguage/Help:Suggestion mode#For administrators %E2%80%93 local customization|instructions for how administrators can customize]] the links to point to local guidelines. The feature is connected to [[mw:Special:MyLanguage/Help:Edit check|Edit check]] which suggests improvements while someone is writing new content. In the future, the Editing team plans to evaluate the feature's impact with newcomers through a controlled experiment. [https://phabricator.wikimedia.org/T404600]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:23}} community-submitted {{PLURAL:23|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, the issue where the cursor became misaligned during the use of CodeMirror’s syntax highlighting, which makes wikitext and code easier to read, has now been fixed. This problem specifically affected users who defined a font rule in a custom stylesheet while creating a new topic with DiscussionTools. [https://phabricator.wikimedia.org/T418793]
'''Updates for technical contributors'''
* API rate limiting update: To help ensure [[mw:Special:MyLanguage/MediaWiki Product Insights/Responsible Reuse|fair use of infrastructure]], global API rate limits will be applied this week to requests without a compliant User-Agent that originate from outside Toolforge/WMCS and to unauthenticated requests made from web browsers. Higher limits will be applied to identified traffic in April. Bots running in Toolforge/WMCS or with the bot user right on any wiki should not be affected for now. However, all developers are advised to follow updated best practices. For more information, see [[mw:Special:MyLanguage/Wikimedia APIs/Rate limits|Wikimedia APIs/Rate limits]].
* The new GraphQL API has been released. The API was developed as a flexible alternative to select features of the Wikidata Query Service (WDQS), to improve developer experience and foster adaptability, and efficient data access. Try it out and [[d:Wikidata:Wikibase GraphQL#Feedback and development|give feedback]]. You can also [https://greatquestion.co/wikimediadeutschland/GraphQLAPI/apply sign up for usability tests].
* The [[m:Special:MyLanguage/Product and Technology Advisory Council/Unsupported Tools Working Group|PTAC Unsupported Tools Working Group]] continued improvements to [[commons:Special:MyLanguage/Commons:Video2commons#|Video2Commons]] in February, with fixes addressing authentication errors, large-file handling, task queue visibility, and clearer upload behavior. Work is still ongoing in some areas, including changes related to deprecated server-side uploads. Read [[m:Special:MyLanguage/Product and Technology Advisory Council/Unsupported Tools Working Group#February 2026|this update]] to learn more.
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.19|MediaWiki]]
'''In depth'''
* The Article Guidance team invites experienced Wikipedia editors from selected [[mw:Special:MyLanguage/Article guidance/Pilot wikis and collaborators#Collaborators|pilot wikis]] and interested contributors from other Wikipedias to fill out this questionnaire which is available in [https://docs.google.com/forms/d/e/1FAIpQLSfmLeVWnxmsCbPoI_UF2jyRcn73WRGWCVPHzerXb4Cz97X_Ag/viewform English], [https://docs.google.com/forms/d/e/1FAIpQLSd6rzr4XXQw8r4024fE3geTPFe13M_6w7Mitj-YJi0sOlWTAw/viewform?usp=header Arabic], [https://docs.google.com/forms/d/e/1FAIpQLSdok3-RfB18lcugYTUMGkpwmqG_8p760Wv4dCXitOXOszjUDw/viewform?usp=header Bengali], [https://docs.google.com/forms/d/e/1FAIpQLSfjTfYp4jEo0akA4B1e-Nfg3QZPCudUjhJzHzzDi6AHyAaMGA/viewform?usp=header Japanese], [https://docs.google.com/forms/d/e/1FAIpQLScteVoI29Aue4xc72dekk-6RYtvmMgQxzMI900UOawrFrSTWg/viewform?usp=header Portuguese], [https://docs.google.com/forms/d/e/1FAIpQLSetdxnYwL3ub2vqA7awCg5hJZPMIYcDPaiTe12rY9h0GYnVlw/viewform?usp=header Persian], and [https://docs.google.com/forms/d/e/1FAIpQLScNvfJF-Ot-4pzA4qAN771_0QDJ4Li19YcUsaTgSKW8Nc7U_Q/viewform?usp=header Turkish]. Your answers will help the team customize guidance for less experienced editors and help them learn community policies and practices while creating an article. Learn more [[mw:Special:MyLanguage/Article guidance|on the project page]].
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/11|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W11"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৮:৫৩, ৯ মার্চ ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30213008-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-12</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W12"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/12|Translations]] are available.
'''Updates for editors'''
* The [[mw:Special:MyLanguage/Help:Extension:CodeMirror|{{int:codemirror-beta-feature-title}}]] beta feature, also known as [[mw:Special:MyLanguage/Extension:CodeMirror|CodeMirror 6]], has been used for wikitext syntax highlighting since November 2024. It will be promoted out of beta by May 2026 in order to bring improvements and new [[mw:Special:MyLanguage/Help:Extension:CodeMirror#Features|features]] to all editors who use the standard syntax highlighter. If you have any questions or concerns about promoting the feature out of beta, [[mw:Special:MyLanguage/Help talk:Extension:CodeMirror|please share]]. [https://phabricator.wikimedia.org/T259059]
* Some changes to local user groups are performed by stewards on Meta-Wiki and logged there only. Now, interwiki rights changes will be logged both on Meta-Wiki and the wiki of the target user to make it easier to access a full record of user's rights changes on a local wiki. Past log entries for such changes will be backfilled in the coming weeks. [https://phabricator.wikimedia.org/T6055]
* On wikis using [[m:Special:MyLanguage/Flagged Revisions|Flagged Revisions]], the number of pending changes shown on [[{{#Special:PendingChanges}}]] previously counted pages which were no longer pending review, because they have been removed from the system without being reviewed, e.g. due to being deleted, moved to a different namespace, or due to wiki configuration changes. The count will be correct now. On some wikis the number shown will be much smaller than before. There should be no change to the list of pages itself. [https://phabricator.wikimedia.org/T413016]
* Wikifunctions composition language has been rewritten, resulting in a new version of the language. This change aims to increase service stability by reducing the orchestrator's memory consumption. This rewrite also enables substantial latency reduction, code simplification, and better abstractions, which will open the door to later feature additions. Read more about [[f:Special:MyLanguage/Wikifunctions:Status updates/2026-03-11|the changes]].
* Users can now sort search results alphabetically by page title. The update gives an additional option to finding pages more easily and quickly. Previously, results could be sorted by Edit date, Creation date, or Relevance. To use the new option, open 'Advanced Search' on the search results page and select 'Alphabetically' under 'Sorting Order'. [https://phabricator.wikimedia.org/T403775]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:28}} community-submitted {{PLURAL:28|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, the bug that prevented UploadWizard on Wikimedia Commons from importing files from Flickr has now been fixed. [https://phabricator.wikimedia.org/T419263]
'''Updates for technical contributors'''
* A new special page, [[{{#special:LintTemplateErrors}}]], has been created to list transcluded pages that are flagged as containing lint errors to help users discover them easily. The list is sorted by the number of transclusions with errors. For example: [[{{#special:LintTemplateErrors}}/night-mode-unaware-background-color]]. [https://phabricator.wikimedia.org/T170874]
* Users of the [[mw:Special:MyLanguage/Help:Extension:CodeMirror|{{int:codemirror-beta-feature-title}}]] beta feature have been using [[mw:Special:MyLanguage/Extension:CodeMirror|CodeMirror]] instead of [[mw:Special:MyLanguage/Extension:CodeEditor|CodeEditor]] for syntax highlighting when editing JavaScript, CSS, JSON, Vue and Lua content pages, for some time now. Along with promoting CodeMirror 6 out of beta, the plan is to replace CodeEditor as the standard editor for these content models by May 2026. [[mw:Special:MyLanguage/Help talk:Extension:CodeMirror|Feedback or concerns are welcome]]. [https://phabricator.wikimedia.org/T419332]
* The [[mw:Special:MyLanguage/Extension:CodeMirror|CodeMirror]] JavaScript modules will soon be upgraded to CodeMirror 6. Leading up to the upgrade, loading the <code dir=ltr>ext.CodeMirror</code> or <code dir=ltr>ext.CodeMirror.lib</code> modules from gadgets and user scripts was deprecated in July 2025. The use of the <code dir=ltr>ext.CodeMirror.switch</code> hook was also deprecated in March 2025. Contributors can now make their scripts or gadgets compatible with CodeMirror 6. See the [[mw:Special:MyLanguage/Extension:CodeMirror#Gadgets and user scripts|migration guide]] for more information. [https://phabricator.wikimedia.org/T373720]
* The MediaWiki Interfaces team is expanding coverage of REST API module definitions to include [[mw:Special:MyLanguage/API:REST API/Extensions|extension APIs]]. REST API modules are groups of related endpoints that can be independently managed and versioned. Modules now exist for [https://phabricator.wikimedia.org/T414470 GrowthExperiments] and [https://phabricator.wikimedia.org/T419053 Wikifunctions] APIs. As we migrate extension APIs to this structure, documentation will move out of the main MediaWiki OpenAPI spec and REST Sandbox view, and will instead be accessible via module-specific options in the dropdown on the [https://test.wikipedia.org/wiki/Special:RestSandbox REST Sandbox] (i.e., [[{{#Special:RestSandbox}}]], available on all wiki projects).
* The [[mw:Special:MyLanguage/Extension:Scribunto|Scribunto]] extension provides different pieces of information about the wiki where the module is being used via the [[mw:Special:MyLanguage/Extension:Scribunto/Lua reference manual|mw.site]] library. Starting last week, the library also provides a [[mw:Special:MyLanguage/Extension:Scribunto/Lua reference manual#mw.site.wikiId|way]] of accessing the [[mw:Special:MyLanguage/Manual:Wiki ID|wiki ID]] that can be used to facilitate cross-wiki module maintenance. [https://phabricator.wikimedia.org/T146616]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.20|MediaWiki]]
'''In depth'''
* The [[m:Special:MyLanguage/Coolest Tool Award|2026 Coolest Tool Award]] celebrating outstanding community tools, is now open for nominations! Nominate your favorite tool using the [https://wikimediafoundation.limesurvey.net/435684?lang=en nomination survey] form by 23 March 2026. For more information on privacy and data handling, please see the [[foundation:Special:MyLanguage/Legal:Coolest_Tool_Award_2026_Survey_Privacy_Statement|survey privacy statement]].
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/12|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W12"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৯:৩৬, ১৬ মার্চ ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30260505-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-13</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W13"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/13|Translations]] are available.
'''Weekly highlight'''
* Wikimedia site users can now log in without a password using passkeys. This is a secure method supported by fingerprint, facial recognition, or PIN. With this change, all users who opt for passwordless login will find it easier, faster, and more secure to log in to their accounts using any device. The new passkey login option currently appears as an autofill suggestion in the username field. An additional [[phab:T417120|"Log in with passkey" button]] will soon be available for users who have already registered a passkey. This update will improve security and user experience. The [[c:File:Passwordless_login_screencast.webm|screen recording]] demonstrates the passwordless login process step by step.
* [[m:Special:MyLanguage/Tech/Server switch|All wikis will be read-only]] for a few minutes on Wednesday, 25 March 2026 at [https://zonestamp.toolforge.org/1774450800 15:00 UTC]. This is for the datacenter server switchover backup tests, [[wikitech:Deployments/Yearly calendar|which happen twice a year]]. During the switchover, all Wikimedia website traffic is shifted from one primary data center to the backup data center to test availability and prevent service disruption even in emergencies.
'''Updates for editors'''
* Wikimedia site users can now export their notifications older than 5 years using a [[toolforge:echo-chamber|new Toolforge tool]]. This will ensure that users retain their important notifications and avoid them being lost based on the planned change to delete notifications older than 5 years, as previously announced. [https://phabricator.wikimedia.org/T383948]
* Wikipedia editors in Indonesian, Thai, Turkish, and Simple English now have access to Special:PersonalDashboard. This is an [[mw:Special:MyLanguage/Moderator Tools/Dashboard|early version of an experience]] that introduces newer editors to patrolling workflows, making it easier for them to move from making edits to participating in more advanced moderation work on their project. [https://phabricator.wikimedia.org/T402647]
* The [[Special:Block]] now has two minor interface changes. Administrators can now easily perform indefinite blocks through a dedicated radio button in the expiry section. Also, choosing an indefinite expiry provides a different set of common reasons to select from, which can be changed at: [[MediaWiki:Ipbreason-indef-dropdown]]. [https://phabricator.wikimedia.org/T401823]
* Mobile editors [[mw:Special:MyLanguage/Contributors/Account Creation Experiments#Logged-out|at several wikis]] can now see an improved logged-out edit warning, thanks to the recent updates from the Growth team. These changes released last week are part of ongoing efforts and tests to enhance [[mw:Special:MyLanguage/Contributors/Account Creation Experiments|account creation experience on mobile]] and then increase participation. [https://phabricator.wikimedia.org/T408484]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:36}} community-submitted {{PLURAL:36|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, the bug that prevented mobile web users from seeing the block information when affected by multiple blocks has been fixed. They can now see messages of all the blocks currently affecting them when they access Wikipedia.
'''Updates for technical contributors'''
* Images built using Toolforge will soon get the upgraded buildpacks version, bringing support for newer language versions and other upstream improvements and fixes. If you use Toolforge Build Service, review the recent [https://lists.wikimedia.org/hyperkitty/list/cloud-announce@lists.wikimedia.org/thread/EMYTA32EV2V5SQ2JIEOD2CL66YFIZEKV/ cloud-announce email] and update your build configuration as necessary to ensure your tools are compatible. [https://wikitech.wikimedia.org/w/index.php?title=Help:Toolforge/Building_container_images&oldid=2392097#Buildpack_environment_upgrade_process][https://phabricator.wikimedia.org/T380127]
* The [https://api.wikimedia.org/wiki/Main_Page API Portal] documentation wiki will shut down in June 2026. API keys created on the API Portal will continue to work normally. api.wikimedia.org endpoints will be deprecated gradually starting in July 2026. Documentation on the API Portal is moving to [[mw:Wikimedia APIs|mediawiki.org]]. Learn more on the [[wikitech:API Portal/Deprecation|project page]].
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.21|MediaWiki]]
'''In depth'''
* [[m:Special:MyLanguage/WMDE Technical Wishes|WMDE Technical Wishes]] is considering improvements to [[m:WMDE Technical Wishes/References/VisualEditor automatic reference names|automatically generated reference names in VisualEditor]]. Please check out the [[m:WMDE Technical Wishes/References/VisualEditor automatic reference names#Proposed solutions|proposed solutions]] and participate in the [[m:Talk:WMDE Technical Wishes/References/VisualEditor automatic reference names#Request for comment|request for comment]].
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/13|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W13"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৬:৫১, ২৩ মার্চ ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30268305-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:UOzurumba (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-14</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W14"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/14|Translations]] are available.
'''Weekly highlight'''
* The Beta version of [[abstract:|Abstract Wikipedia]] a new Wikimedia project which is language-independent, was launched last week. The project allows communities to build Wikipedia articles in their native language, which can be readily accessed by other users in their own languages. The wiki is powered by instructions from Wikifunctions and also based on structured content from Wikidata. [[:f:Special:MyLanguage/Wikifunctions:Status updates/2026-03-26|Read more]].
'''Updates for editors'''
* The Growth team is running an A/B test to evaluate a clearer, more user-friendly message that promotes account creation on wikis. Currently when logged-out mobile users begin editing, they see a jarring warning message that can feel abrupt and discouraging. This also presents temporary account editing as the default rather than encouraging account creation. The test is running on ten Wikipedias, including Arabic, French, Spanish and German. [[mw:Special:MyLanguage/Contributors/Account Creation Experiments#2. Improve logged-out warning message (T415160)|Read more]].
* The Wikimedia Apps team is inviting feedback on [[mw:Special:MyLanguage/Wikimedia Apps/Team/Future of Editing on the Mobile Apps|how editing should work on the Wikipedia mobile apps]]. The discussion focuses on improving how users access editing tools when they tap "Edit". This is part of a broader effort to convert readers who develop an interest in editing, to access a more user-friendly pathway to start contributing.
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:45}} community-submitted {{PLURAL:45|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, an issue where citation fetching from the large newspaper archive [https://www.newspapers.com Newspapers.com] was no longer working, due to a block in [[mw:Special:MyLanguage/Citoid|Citoid]] requests, has now been fixed. [https://phabricator.wikimedia.org/T419903]
'''Updates for technical contributors'''
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.22|MediaWiki]]
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/14|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W14"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৯:২৬, ৩০ মার্চ ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30329462-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-15</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W15"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/15|Translations]] are available.
'''Updates for editors'''
* The [[mw:Special:MyLanguage/Help:Extension:CampaignEvents|CampaignEvents extension]] now includes a new group goal-setting feature, enabling organizers to set and track event goals such as the number of articles created and participating contributors in real time. Similarly, participants can work toward shared targets and see their collective impact as the event unfolds. The feature is now available on all Wikimedia wikis. Learn more in [[mw:Special:MyLanguage/Help:Extension:CampaignEvents/Registration/Collaborative contributions#Goal setting|the documentation]].
* [[File:Maki-gift-15.svg|12px|link=|class=skin-invert|Wishlist item]] The new [[mw:Special:MyLanguage/Help:Watchlist labels|watchlist labels]] feature (announced in [[m:Special:MyLanguage/Tech/News/2026/07|Tech News 2026-07]]) is now available via VisualEditor, the source editor, and the 'watchstar' (or watch link, for skins that don't have a star icon). Previously it was only possible to assign labels via [[Special:EditWatchlist|EditWatchlist]]. In all three places it is a new field following the expiry field.
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:23}} community-submitted {{PLURAL:23|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, the issue where talk pages on mobile with Parsoid are unusable after empty section headers, has now been fixed. [https://phabricator.wikimedia.org/T419171]
'''Updates for technical contributors'''
* The [[m:Special:MyLanguage/WMDE Technical Wishes/Sub-referencing|sub-referencing feature]], which lets editors add details to an existing reference without duplicating it, will be gradually rolled out to [[phab:T414094|more wikis]] later this year. Wikis using the [[mw:Special:MyLanguage/Reference Tooltips|Reference Tooltips]] gadget are encouraged to update their version (typically at [[m:MediaWiki:Gadget-ReferenceTooltips.js|MediaWiki:Gadget-ReferenceTooltips.js]] as shown [https://en.wikipedia.org/w/index.php?diff=1344408362 here]) to ensure compatibility. Other reference-related gadgets may also be affected. [https://phabricator.wikimedia.org/T416304]
* All Wikinews editions will be closed and switched to read-only mode on 4 May 2026. Content will remain accessible, but no new edits or articles can be added. This closure was approved by the Board of Trustees of the Wikimedia Foundation following extended discussions. [[m:Wikimedia Foundation Board noticeboard#Board of Trustees Approves Closure of Wikinews|Read more]].
* The [[:mw:Special:MyLanguage/API:Action API|Action API]] has had several formats for requested output. One of them, <bdi lang="zxx" dir="ltr"><code><nowiki>format=php</nowiki></code></bdi>, is being removed soon. Please ensure your scripts or bots use the [[mw:Special:MyLanguage/API:Data formats#Output|JSON format]]. This removal should affect very few scripts and bots. [https://phabricator.wikimedia.org/T118538]
* The [[Special:NamespaceInfo|Special:NamespaceInfo]] page now includes namespace aliases. For example "WP" for the "Project" ("Wikipedia") namespace on the German Wikipedia. [https://phabricator.wikimedia.org/T381455]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.23|MediaWiki]]
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/15|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W15"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৬:১৯, ৬ এপ্রিল ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30362761-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
== <span lang="en" dir="ltr">Tech News: 2026-16</span> ==
<div lang="en" dir="ltr">
<section begin="technews-2026-W16"/><div class="plainlinks">
Latest '''[[m:Special:MyLanguage/Tech/News|tech news]]''' from the Wikimedia technical community. Please tell other users about these changes. Not all changes will affect you. [[m:Special:MyLanguage/Tech/News/2026/16|Translations]] are available.
'''Weekly highlight'''
* Experienced editors are invited to [https://b24e11a4f1.catalyst.wmcloud.org/wiki/Main_Page test] the [[mw:Special:MyLanguage/Article guidance|Article guidance]] feature, designed to help less-experienced editors create well-structured, policy-compliant Wikipedia articles. Testing instructions are [[mw:Special:MyLanguage/Article guidance/Test feature guide|available]]. Also, after reviewing [https://b24e11a4f1.catalyst.wmcloud.org/wiki/Category:Pages_using_article_guidance the outlines], please provide feedback on the [[mw:Talk:Article guidance|project talk page]]. Based on your input, the feature will be refined and transferred to the pilot Wikipedias to translate and adapt. Check out [[c:File:Article Guidance workflow demo - April 2026.webm|the video]] explaining the feature.
'''Updates for editors'''
* On most wikis, all autoconfirmed users can now use [[Special:ChangeContentModel|Special:ChangeContentModel]] page to [[mw:Special:MyLanguage/Help:ChangeContentModel|create new pages with custom content models]], such as mass message lists, making custom page formats more accessible. Check [[Special:ListGroupRights|Special:ListGroupRights]] for the status of your wiki. [https://phabricator.wikimedia.org/T248294]
* The Growth team has launched an [[mw:Special:MyLanguage/Contributors/Account_Creation_Experiments|account creation experiment]] to evaluate whether adding an account creation button to the mobile web header increases new account registrations and encourages more mobile users to contribute to the wikis. The experiment is currently live on Hindi, Indonesian, Bengali, Thai, and Hebrew Wikipedia, and targets 10% of logged-out mobile web users.
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] View all {{formatnum:30}} community-submitted {{PLURAL:30|task|tasks}} that were [[m:Special:MyLanguage/Tech/News/Recently resolved community tasks|resolved last week]]. For example, an issue where VisualEditor could get stuck loading on Windows devices with animations turned off, has now been fixed. [https://phabricator.wikimedia.org/T382856]
'''Updates for technical contributors'''
* Starting later this week, {{int:group-abusefilter}} who have the [[mw:Special:MyLanguage/Help:Extension:CodeMirror|{{int:codemirror-beta-feature-title}}]] beta feature enabled will have [[mw:Special:MyLanguage/Extension:CodeMirror|CodeMirror]] instead of [[mw:Special:MyLanguage/Extension:CodeEditor|CodeEditor]] as the editor at [[Special:AbuseFilter|Special:AbuseFilter]]. This is part of the broader effort to make the user experience more consistent across all editors. [https://phabricator.wikimedia.org/T399673][https://phabricator.wikimedia.org/T419332]
* Tools and bots that access the [[mw:Special:MyLanguage/Notifications/API|Notifications API]] (<bdi lang="zxx" dir="ltr"><code><nowiki>action=query&meta=notifications</nowiki></code></bdi>) will need to update their OAuth or BotPassword grants to also include access to private notifications. [https://phabricator.wikimedia.org/T421991]
* Due to a library upgrade, listings on category pages may be displayed out of order starting on Monday, 20th April. A migration script will be run to correct this, and will take hours to days depending on the size of the wiki (up to a week for English Wikipedia). [https://phabricator.wikimedia.org/T422544]
* [[File:Reload icon with two arrows.svg|12px|link=|class=skin-invert|Recurrent item]] Detailed code updates later this week: [[mw:MediaWiki 1.46/wmf.24|MediaWiki]]
'''''[[m:Special:MyLanguage/Tech/News|Tech news]]''' prepared by [[m:Special:MyLanguage/Tech/News/Writers|Tech News writers]] and posted by [[m:Special:MyLanguage/User:MediaWiki message delivery|bot]] • [[m:Special:MyLanguage/Tech/News#contribute|Contribute]] • [[m:Special:MyLanguage/Tech/News/2026/16|Translate]] • [[m:Tech|Get help]] • [[m:Talk:Tech/News|Give feedback]] • [[m:Global message delivery/Targets/Tech ambassadors|Subscribe or unsubscribe]].''
</div><section end="technews-2026-W16"/>
</div>
<bdi lang="en" dir="ltr">[[User:MediaWiki message delivery|MediaWiki message delivery]]</bdi> ১৫:১৯, ১৩ এপ্রিল ২০২৬ (ইউটিসি)
<!-- https://meta.wikimedia.org/w/index.php?title=Global_message_delivery/Targets/Tech_ambassadors&oldid=30380527-এর তালিকা ব্যবহার করে বার্তাটি ব্যবহারকারী:STei (WMF)@metawiki পাঠিয়েছেন -->
mrqggv8fbajdnbkk945tpw40l16pzm4
মডিউল:links/data
828
25308
507789
323747
2026-04-14T06:45:58Z
Redmin
6857
[[en:Module:links/data|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507789
Scribunto
text/plain
local data = {}
local unpack = unpack or table.unpack -- Lua 5.2 compatibility
local u = require("Module:string utilities").char
data.phonetic_extraction = {
["th"] = "Module:th",
["km"] = "Module:km",
}
data.ignored_prefixes = {
["cat"] = true,
["category"] = true,
["file"] = true,
["image"] = true
}
-- Scheme for using unsupported characters in titles.
data.unsupported_characters = {
["#"] = "`num`",
["%"] = "`percnt`", -- only escaped in percent encoding
["&"] = "`amp`", -- only escaped in HTML entities
["."] = "`period`", -- only escaped in dot-slash notation
["<"] = "`lt`",
[">"] = "`gt`",
["["] = "`lsqb`",
["]"] = "`rsqb`",
["_"] = "`lowbar`",
["`"] = "`grave`", -- used to enclose unsupported characters in the scheme, so a raw use in an unsupported title must be escaped to prevent interference
["{"] = "`lcub`",
["|"] = "`vert`",
["}"] = "`rcub`",
["~"] = "`tilde`", -- only escaped when 3 or more are consecutive
["\239\191\189"] = "`repl`" -- replacement character U+FFFD, which can't be typed directly here due to an abuse filter
}
-- Manually specified unsupported titles. Only put titles here if there is a different reason why they are unsupported, and not just because they contain one of the unsupported characters above.
data.unsupported_titles = {
[" "] = "Space",
["&"] = "`amp`amp;",
["λοπαδοτεμαχοσελαχογαλεοκρανιολειψανοδριμυποτριμματοσιλφιοκαραβομελιτοκατακεχυμενοκιχλεπικοσσυφοφαττοπεριστεραλεκτρυονοπτοκεφαλλιοκιγκλοπελειολαγῳοσιραιοβαφητραγανοπτερύγων"] = "Ancient Greek dish",
["กรุงเทพมหานคร อมรรัตนโกสินทร์ มหินทรายุธยา มหาดิลกภพ นพรัตนราชธานีบูรีรมย์ อุดมราชนิเวศน์มหาสถาน อมรพิมานอวตารสถิต สักกะทัตติยวิษณุกรรมประสิทธิ์"] = "Thai name of Bangkok",
[u(0x1680)] = "Ogham space",
[u(0x3000)] = "Ideographic space"
}
-- Mammoth pages contain only Translingual and English entries, if present. The remaining L2s are placed on subpages.
-- The same subpage titles are used across all mammoth pages for the convenience of bot and script operators.
-- Assuming that most mammoth pages will be Latin-script terms, the subpage groupings are determined by dividing the
-- list of Latin-script languages known to Wiktionary into two (three, ...) roughly equal alphabetic divisions. This is
-- easily done by looking at Petscan's output:
-- https://petscan.wmcloud.org/?sortby=title&language=en&ns%5B14%5D=1&categories=Latin+script+languages&project=wiktionary&doit=
-- This data structure contains types of splits, each of which is a list of names of splits and Lua patterns applied to
-- the decomposed L2 name (with apostrophes and double quotes removed and certain other transformations applied; see
-- get_L2_sort_key() in [[Module:headword/page]]), or "true" for the final catch-all subpage (which includes anything
-- not beginning with a Latin letter after the transformations are applied; this includes e.g. ǃKung but not 'Are'are,
-- which sorts with A, and not Àhàn, which likewise sorts with A). The patterns must be suitable for use with plain
-- string functions, not their mw.ustring equivalents.
data.mammoth_page_subpage_types = {
twos = {
{"languages A to L", "^[A-L]"},
{"languages M to Z", true},
},
threes = {
{"languages A to I", "^[A-I]"},
{"languages J to Q", "^[J-Q]"},
{"languages R to Z", true},
},
CJK = {
{"languages A to C", "^[A-C]"}, -- Translingual and Chinese on one page
{"languages D to Z", true}, -- all the remainder (mostly Japanese, Korean, Vietnamese) on the other
},
}
-- "Mammoth pages" are pages whose entries cannot be housed on a single page because of MediaWiki limits. The key is
-- the page and the value is the subpage type, as defined above in `mammoth_page_subpage_types`.
data.mammoth_pages = {
["a"] = "twos", -- FIXME: change to threes
["mammoth page test"] = "twos", -- required for testing purposes - please leave here
}
return data
q02q3zllkyysivftvjq4h2gli665cgv
clarity
0
31610
507785
91903
2026-04-14T06:37:15Z
Redmin
6857
507785
wikitext
text/x-wiki
== {{ভাষা|en}} ==
=== উচ্চারণ ===
* {{উচ্চারণ-ভঙ্গি|UK}} {{আধ্বব|en|/ˈklæɹ.ɪ.ti/|/ˈklæɹ.ə.ti/}}
* {{উচ্চারণ-ভঙ্গি|US}} {{আধ্বব|en|/ˈklæɹ.ɪ.ti/|/ˈkleɹ.ə.ti/}}
* {{উচ্চারণ-ভঙ্গি|AusE}} {{আধ্বব|en|/ˈklæɹ.ə.ti/}}
* {{অন্ত্যমিল|en|æɹɪti}}
=== বিশেষ্য ===
{{en-বিশেষ্য|~}}
# [[নির্মলতা]]
dtq7z4t3dymdybbaf33oi6r4zcev3c3
মডিউল:আভিধানিক উপাত্ত
828
50158
507781
507770
2026-04-13T19:43:26Z
Redmin
6857
507781
Scribunto
text/plain
local p = {}
local i18n = require('মডিউল:আভিধানিক উপাত্ত/i18n')
local references = require('মডিউল:উইকিউপাত্ত তথ্যসূত্র বিন্যাসকরণ').format
local getArgs = require('Module:Arguments').getArgs
local formatter_urls = require('মডিউল:আভিধানিক উপাত্ত/urls').formatter_urls
local wb = mw.wikibase
local ustring = mw.ustring
local html = mw.html
local mw_lang = mw.language
local entity_cache = {}
local reference_cache = {}
local forms
local function countWords(string)
local count = 0
for word in ustring.gmatch(string, "%S+") do
count = count + 1
end
return count
end
local function normalizeLemmas(text)
text = mw.ustring.gsub(text, "[ً-ٟ]", "")
text = mw.ustring.gsub(text, "ٰ", "")
text = mw.ustring.gsub(text, "أ", "ا")
text = mw.ustring.gsub(text, "إ", "ا")
text = mw.ustring.gsub(text, "آ", "ا")
return text
end
local function serializeTable(val, name, skipnewlines, depth) -- https://stackoverflow.com/a/6081639
skipnewlines = skipnewlines or false
depth = depth or 0
local tmp = string.rep(" ", depth)
if name then tmp = tmp .. name .. " = " end
if type(val) == "table" then
tmp = tmp .. "{" .. (not skipnewlines and "\n" or "")
for k, v in pairs(val) do
tmp = tmp .. serializeTable(v, k, skipnewlines, depth + 1) .. "," .. (not skipnewlines and "\n" or "")
end
tmp = tmp .. string.rep(" ", depth) .. "}"
elseif type(val) == "number" then
tmp = tmp .. tostring(val)
elseif type(val) == "string" then
tmp = tmp .. string.format("%q", val)
elseif type(val) == "boolean" then
tmp = tmp .. (val and "true" or "false")
else
tmp = tmp .. "\"[inserializeable datatype:" .. type(val) .. "]\""
end
return tmp
end
-- Use this to safely expand templates when you are not sure that they exist.
local function safeExpand(frame, title, args)
local success, result = pcall(function()
return frame:expandTemplate{ title = title, args = args }
end)
if result:find('does not exist') then -- expandTemplate() doesn't seem to throw any error that can be handled with pcall() so string search is the only viable option.
return nil
end
return result
end
local function getReference( id, reference )
local out_id = nil
local url_value
if reference_cache[id] == nil then
local ref_text = references(reference, wb, mw_lang, i18n['content_lang_code'], i18n['wikipedia'])
if reference.snaks ~= nil then
if reference.snaks['P248'] ~= nil then
for _, snak in pairs(reference.snaks['P248']) do
if snak.datavalue and snak.datavalue.value.id == 'Q428' then -- কুরআন
ref_text = ustring.gsub(ref_text, 'নামক অনুচ্ছেদ', 'নং আয়াত')
break
end
end
end
if reference.snaks['P854'] ~= nil then
local snak = reference.snaks['P854'][1]
if snak.datavalue then
url_value = snak.datavalue.value
end
end
end
if url_value ~= nil then
ref_text = ref_text .. ', [' .. url_value .. ' সংযোগ]'
end
reference_cache[id] = ref_text
else
out_id = id
end
return {out_id, reference_cache[id]}
end
local function getEntity( id )
if entity_cache[id] == nil then
entity_cache[id] = wb.getEntity(id)
end
return entity_cache[id] ~= false and entity_cache[id] or nil
end
local function getLexemeLanguageCode(current_lexeme)
local lang_item_id = current_lexeme:getLanguage()
if lang_item_id == nil then
return nil
end
local lang_entity = getEntity(lang_item_id)
if lang_entity == nil then
return nil
end
for i, statement_property in ipairs({'P305','P424', 'P220'}) do -- আইইটিএফ ভাষা ট্যাগ, উইকিমিডিয়া ভাষা কোড, আইএসও ৬৩৯-৩
local statements = lang_entity:getBestStatements(statement_property)
if statements[1] ~= nil then
return statements[1].mainsnak.datavalue.value
end
end
return nil
end
-- Return the first form of the lexeme which has exactly the given grammatical feature.
local function formWithSingleGrammaticalFeature( item_id )
for i = 1, #forms do
local grammaticalFeatures = forms[i]:getGrammaticalFeatures()
if #grammaticalFeatures == 1 and grammaticalFeatures[1] == item_id then
return forms[i]
end
end
return nil
end
local function getArticleLinkTemplate(frame, stmt_value)
local template = ''
local sitelink = getEntity(stmt_value):getSitelink(i18n['wikipedia'])
if sitelink ~= nil then
template = frame:expandTemplate{
title=i18n['template_wikipedia'],
args={sitelink}
}
end
return template
end
local function getArticleLinks (frame, sense )
local article_links = ''
for i, stmt in pairs(sense:getAllStatements('P5137')) do -- এই অর্থের জন্য আইটেম
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
for i, stmt in pairs(sense:getAllStatements('P9970')) do -- এই অর্থের জন্য বিধেয়
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
return article_links
end
-- @TODO: Generalise
local function expandTemplateForProperty(frame, object, property, template)
local lemmas = {}
local n = 0
for _, stmt in pairs(object:getAllStatements(property)) do
local lex = wb.lexeme.splitLexemeId(stmt.mainsnak.datavalue.value.id)
lex = getEntity(lex)
n = n + 1
lemmas[n] = lex:getLemma(lang_code)
end
if not lang_code or n == 0 then
return ''
end
-- Build args: first lang_code, then lemmas
local args = {lang_code}
for i = 1, n do
args[#args + 1] = lemmas[i]
end
return frame:expandTemplate{
title = template,
args = args
}
end
local function getExternalLinks( entity ) -- T418639
local external_links = {}
if entity.claims == nil then return external_links end
for property_id, statements in pairs(entity.claims) do
local formatter_url = formatter_urls[property_id]
if formatter_url ~= nil then
local property_source = wb.getBestStatements(property_id, 'P9073')
local source_name
if next(property_source) ~= nil then
source_name = wb.getLabel(property_source[1].mainsnak.datavalue.value.id)
or property_source[1].mainsnak.datavalue.value.id
else
source_name = wb.getLabel(property_id) or property_id
end
for i = 1, #statements do
local stmt = statements[i]
if stmt.mainsnak.datavalue then
local formatted_link = ustring.gsub(
ustring.gsub(formatter_url, '$1', stmt.mainsnak.datavalue.value),
' ', '+'
)
table.insert(external_links,
'[' .. formatted_link .. ' ' .. source_name .. ']')
end
end
end
end
return external_links
end
p.getExternalLinks = getExternalLinks -- রেখে দিন যাতে ডিবাগিং সম্ভব হয়
local function termSpan( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( text )
return tostring( span )
end
local function termLink( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( '[[' .. text .. ']]' )
return tostring( span )
end
local function getLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termSpan(rep)
else
lemma_string = lemma_string .. '/' .. termSpan(rep)
end
end
return lemma_string
end
local function getLinkedLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termLink(rep)
else
lemma_string = lemma_string .. '/' .. termLink(rep)
end
end
return lemma_string
end
local function getExamples( current_lexeme, sense_id, references_seen )
local examples = html.create('dl')
for i, stmt in pairs(current_lexeme:getAllStatements('P5831')) do -- ব্যবহারের উদাহরণ
if stmt.qualifiers ~= nil and stmt.qualifiers['P6072'] ~= nil and stmt.qualifiers['P6072'][1].datavalue.value.id == sense_id then -- বিষয়ে লেক্সিমের অর্থ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
local example_form_strs = {}
if stmt.qualifiers['P1810'] ~= nil then
table.insert(example_form_strs, stmt.qualifiers['P1810'][1].datavalue.value)
elseif stmt.qualifiers['P5830'] ~= nil then
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
for i, rep in pairs(example_form:getRepresentations()) do
table.insert(example_form_strs, rep[1])
end
end
example_str = nil
for i, example_form_str in pairs(example_form_strs) do
new_example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
if new_example_text ~= example_text then
example_str = termSpan({new_example_text, example_lang})
break
end
new_example_text = example_text
end
if example_str == nil then
example_str = termSpan({example_text, example_lang})
end
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
end
for i, stmt in pairs(wb.getAllStatements(sense_id, 'P5831')) do -- ব্যবহারের উদাহরণ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
local example_form_str = nil
if stmt.qualifiers['P1810'] ~= nil then
example_form_str = stmt.qualifiers['P1810'][1].datavalue.value
end
if example_form_str == nil then
example_form_str = example_form:getRepresentation(i18n['content_lang_code'])
end
if example_form_str == nil then
example_form_str = example_form:getRepresentations()[1][1]
end
example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
example_str = termSpan({example_text, example_lang})
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
return { tostring(examples) , references_seen }
end
local function checkTitleCodePointInRange(title, start_point, end_point)
return ustring.find( title, '[' ..ustring.char(start_point) .. '-' .. ustring.char(end_point) .. ']' )
end
local function getLanguageForCategories( lang_id, current_page_title )
-- বিশেষ ভাষার জন্য
if lang_id == 'Q11051' then -- হিন্দি/উর্দু
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- উর্দু
lang_id = 'Q11051ur'
elseif checkTitleCodePointInRange(current_page_title, 0x0900, 0x097f) ~= nil then -- হিন্দি
lang_id = 'Q11051hi'
end
elseif lang_id == 'Q58635' then -- পাঞ্জাবি
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- শাহমুখী
lang_id = 'Q58635pnb'
elseif checkTitleCodePointInRange(current_page_title, 0x0a00, 0x0a7f) ~= nil then -- গুরুমুখী
lang_id = 'Q58635pa'
end
elseif lang_id == 'Q56356571' then -- নয়া ফার্সি ভাষা
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- ফার্সি (ইরান/আফগানিস্তান)
lang_id = 'Q56356571fa'
elseif checkTitleCodePointInRange(current_page_title, 0x0400, 0x04ff) ~= nil then -- তাজিক
lang_id = 'Q56356571tg'
end
end
return lang_id
end
local function getOneStringForProperty(object, property)
local val
local stmts = object:getAllStatements(property)
if #stmts ~= 0 then
val = stmts[1].mainsnak.datavalue.value
end
return val
end
local function getTranslations(frame, senses) -- TODO: woefully incomplete until T185313 and T199887 are resolved
if #senses == 0 then
return nil
end
local translation_set = {}
for i, sense in pairs(senses) do
for i, stmt in pairs(sense:getAllStatements('P5972')) do
translation = stmt.mainsnak.datavalue.value.id
local lexeme_id, sense_id = wb.lexeme.splitLexemeId(translation)
language = wb.getLabel(getEntity(lexeme_id):getLanguage())
gloss = sense:getGloss('bn')
table.insert(translation_set, language .. ': ' .. getLinkedLemmata(getEntity(lexeme_id)) .. '<br/>')
end
end
if #translation_set == 0 then
return nil
end
local translations = frame:expandTemplate{title = i18n['template_trans-top'], args={gloss}}
translations = '====' .. i18n['heading_translation'] .. '==== \n' .. translations .. table.concat(translation_set, '\n') .. frame:expandTemplate{title = i18n['template_trans-bottom']}
return translations
end
local function getCategory( lang_category, cat_id )
local cat_text = wb.getLabel( cat_id )
local lang_categories = i18n['lang_categories']
if lang_categories[lang_category] ~= nil then
local lang_cat = lang_categories[lang_category]
if lang_cat[cat_id] ~= nil then
cat_text = cat_text .. '[[Category:' .. lang_cat[cat_id] .. ']]'
else
cat_text = cat_text .. '[[Category:' .. lang_cat['_'] .. ']]'
end
else
cat_text = cat_text .. '[[Category:' .. i18n['maintenance_no_lang_category_found'] .. ']]'
end
return cat_text
end
local createicon = function(langcode, entityID, propertyID)
langcode = langcode or ""
propertyID = propertyID or ""
local icon = " <span class='penicon autoconfirmed-show'>[["
-- " <span data-bridge-edit-flow='overwrite' class='penicon'>[[" -> enable Wikidata Bridge
.. "File:OOjs UI icon edit-ltr-progressive.svg |frameless |text-top |10px |alt="
.. i18n['edit_wikidata']
.. "|link=https://www.wikidata.org/entity/" .. entityID
if langcode ~= "" then icon = icon .. "?uselang=" .. langcode end
if propertyID ~= "" then icon = icon .. "#" .. propertyID end
icon = icon .. "|" .. i18n['edit_wikidata'] .. "]]</span>"
return icon
end
local function getMeanings( frame, args, current_lexeme, senses, references_seen, language_name)
if #senses == 0 then
return {createicon(i18n['content_lang_code'], current_lexeme:getId()) .. "''" .. i18n['text_category_rfdef'] .. "''" .. '[[Category:' .. i18n['category_rfdef'] .. ']]', references_seen}
end
local meanings = html.create( 'ol' )
for i, sense in pairs(senses) do
local gloss_text_parts = {}
local main_gloss_text = frame:expandTemplate{
title=i18n['template_anchor'],
args={sense:getId()}
}
local specifiers = {}
for k, property_id in ipairs({'P6084', 'P6191', 'P9488'}) do -- অবস্থান যেখানে শব্দার্থ ব্যবহৃত, যে রীতিতে শব্দার্থ ব্যবহৃত হয়, যে ক্ষেত্রে ব্যবহৃত
for i, stmt in pairs(sense:getAllStatements(property_id)) do
stmt_value = stmt.mainsnak.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value, i18n['content_lang_code']))
end
end
if #specifiers > 0 then
main_gloss_text = main_gloss_text .. "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
local gloss = sense:getGloss( i18n['content_lang_code'] )
if gloss ~= nil then
if countWords(gloss) == 1 then
main_gloss_text = main_gloss_text .. "[[" .. gloss .. "#" .. i18n['content_lang_name'] .. "|" .. gloss .. "]]"
else
main_gloss_text = main_gloss_text .. gloss
end
if gloss:match('^প্রদত্ত%s*(%S-)%s*নাম$') then -- given names
main_gloss_text = main_gloss_text .. '[[' .. 'Category:' .. language_name .. ' ' .. i18n['category_given_names'] .. ']]'
end
else
local other_gloss_text = nil
local other_gloss_lang = nil
local item_label_gloss_parts = {}
for k, stmt in pairs(sense:getAllStatements('P5137')) do -- যদি 'এই অর্থের জন্য আইটেম' মানের বাংলা লেবেল থাকে
local stmt_value = stmt.mainsnak.datavalue.value.id
local stmt_label = wb.getLabel(stmt_value)
if stmt_label ~= nil then
table.insert(item_label_gloss_parts, '[[:d:' .. stmt_value .. '|' .. stmt_label .. ']]')
end
end
if #item_label_gloss_parts > 0 then
other_gloss_text = table.concat(item_label_gloss_parts, '; ')
end
if other_gloss_text == nil then
for i, fallback_lang in ipairs(mw_lang.getFallbacksFor( i18n['content_lang_code'] )) do
if sense:getGloss( fallback_lang ) ~= nil then
other_gloss_text, other_gloss_lang = sense:getGloss( fallback_lang )
end
end
if other_gloss_lang == nil then
local glosses = sense:getGlosses()
for j, gloss in pairs(glosses) do
other_gloss_text = gloss[1]
other_gloss_lang = gloss[2]
break
end
end
main_gloss_text = main_gloss_text .. other_gloss_text .. "<sup><em>" .. mw_lang.fetchLanguageName(other_gloss_lang, i18n['content_lang_code']) .. "</em></sup>"
else
main_gloss_text = main_gloss_text .. "''" .. other_gloss_text .. "''"
end
main_gloss_text = main_gloss_text .. '[[Category:' .. i18n['category_rfdef_equivalent'] .. ']]'
end
local synonym = expandTemplateForProperty(frame, sense, 'P5973', i18n['template_synonym'])
if synonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. synonym
end
local antonym = expandTemplateForProperty(frame, sense, 'P5974', i18n['template_antonym'])
if antonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. antonym
end
local hypernym = expandTemplateForProperty(frame, sense, 'P6593', i18n['template_hypernym'])
if hypernym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. hypernym
end
if lex_cat == 'Q1084' or lex_cat == 'Q147276' then -- noun or proper noun
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-noun'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
elseif lex_cat == 'Q34698' then
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-adj'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
end
table.insert(gloss_text_parts, main_gloss_text .. createicon(i18n['content_lang_code'], sense:getId()))
for i, stmt in pairs(sense:getAllStatements('P8394')) do -- টিপ্পনীর উদ্ধৃতি
gloss_quote = termSpan({stmt.mainsnak.datavalue.value.text, stmt.mainsnak.datavalue.value.language})
if stmt.references[1] ~= nil then
local got_reference = getReference ( stmt.references[1].hash, stmt.references[1] )
gloss_quote = '"' .. gloss_quote .. '" ' .. got_reference[2]
end
table.insert(references_seen, stmt.references[1].hash)
table.insert(gloss_text_parts, frame:extensionTag('ref', gloss_quote))
end
for i, stmt in pairs(sense:getAllStatements('P1343')) do -- বর্ণিত উৎস
-- TODO: do away with making fake reference objects
local fake_reference = { ['snaks'] = {} }
fake_reference.snaks['P248'] = { [1] = stmt.mainsnak }
qualifiers_order = stmt['qualifiers-order']
if qualifiers_order ~= nil then
for i, k in ipairs(qualifiers_order) do fake_reference.snaks[k] = stmt.qualifiers[k] end
end
fake_reference.hash = mw.hash.hashValue('sha3-512', serializeTable(fake_reference))
table.insert(references_seen, fake_reference.hash)
local got_reference = getReference(fake_reference.hash, fake_reference)
if got_reference[1] == nil then
table.insert(gloss_text_parts, frame:extensionTag('ref', got_reference[2], {name = fake_reference.hash}))
else
table.insert(gloss_text_parts, frame:extensionTag{name = 'ref', content='', args = {name = got_reference[1]}})
end
end
local first_sense_image = ''
local sense_images = sense:getAllStatements('P18')
if next(sense_images) ~= nil then
first_sense_image = sense_images[1].mainsnak.datavalue.value
end
if first_sense_image ~= '' then
table.insert(gloss_text_parts, '[[চিত্র:' .. first_sense_image .. "|thumb|'''" .. getLemmata(current_lexeme) .. "'''—" .. main_gloss_text .. ']]')
end
local idlinks = getExternalLinks(sense)
if #idlinks > 0 then
local idlinktext = '<small>('
for i, idlink in pairs(idlinks) do
idlinktext = idlinktext .. idlink .. '\n'
end
idlinktext = idlinktext .. ')</small>'
table.insert(gloss_text_parts, idlinktext)
end
local externallinks = getArticleLinks(frame, sense)
if externallinks ~= '' then
table.insert(gloss_text_parts, externallinks)
end
local new_notes = {}
local sense_keys = { sense:getId(), string.sub(sense:getId(), string.find(sense:getId(), '-')+1) }
for i, v in ipairs(sense_keys) do
if args[v] ~= nil then
table.insert(new_notes, args[v])
end
end
if #new_notes > 0 then
for i, v in ipairs(new_notes) do
if i == 1 then
table.insert(gloss_text_parts, '<br/>' .. v)
else
table.insert(gloss_text_parts, v)
end
end
end
local examples, references_seen = unpack(getExamples( current_lexeme, sense:getId(), references_seen ))
local gloss_text = table.concat(gloss_text_parts, '\n')
meanings:tag('li'):wikitext(gloss_text):wikitext(examples)
end
return {meanings, references_seen}
end
function getPronunciationBaseForm( lang_name, lex_cat)
local base_form = nil
-- (!) অন্য ভাষার শব্দের যদি অন্য রকম মূল ফর্ম থাকে সেগুলো এখানে নতুন if বিবৃতি দিয়ে যোগ করা যায়।
if lang_name == 'বাংলা' then
if lex_cat == 'Q1084' then -- বিশেষ্য
base_form = formWithSingleGrammaticalFeature( 'Q131105' ) -- কর্তৃকারক
elseif lex_cat == 'Q24905' then -- ক্রিয়া
base_form = formWithSingleGrammaticalFeature( 'Q1350145' ) -- ক্রিয়া বিশেষ্য
end
end
if base_form == nil then
for i, form in pairs(forms) do
base_form = form
break
end
end
return base_form
end
local function getCombines( current_lexeme )
local combines = ''
local index_mappings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P5238')) do
if stmt.qualifiers ~= nil and stmt.qualifiers['P1545'] ~= nil then -- ক্রম
local current_index = tonumber(stmt.qualifiers['P1545'][1].datavalue.value)
index_mappings[current_index] = stmt
end
end
if #index_mappings ~= 0 then
for i, stmt in ipairs(index_mappings) do
local part_lexeme_id = stmt.mainsnak.datavalue.value.id
local part_lexeme = getEntity(part_lexeme_id)
local current_substring = getLinkedLemmata(part_lexeme)
local part_etymology = getEtymology(part_lexeme)
if part_etymology ~= '' and part_etymology ~= nil then
current_substring = current_substring .. ' (← ' .. part_etymology .. ')'
end
if combines == '' then
combines = current_substring
else -- @TODO: This shoukd use the 'affix' template instead.
combines = combines .. ' + ' .. current_substring
end
end
end
return combines
end
function getRoots ( current_lexeme )
local stmts = current_lexeme:getAllStatements('P5920')
if #stmts == 0 then return '' end
local root_lexeme = getEntity(stmts[1].mainsnak.datavalue.value.id)
return '√' .. getLinkedLemmata(root_lexeme)
end
function getEtymology ( current_lexeme )
-- TODO: see if any etymology chains are not possible to render
local etymology = ''
local current_combines = getCombines(current_lexeme)
local current_roots = getRoots(current_lexeme)
local stmts = current_lexeme:getAllStatements('P5191')
if #stmts == 0 then
if current_roots ~= '' and current_combines ~= '' then
return current_roots .. '<br/>(' .. current_combines .. ')'
elseif current_roots ~= '' then
return current_roots
else
return current_combines
end
end
for i, stmt in pairs(stmts) do
local origin_lexeme_dv = stmt.mainsnak.datavalue -- If this is nil, the origin lexeme is not known.
if origin_lexeme_dv ~= nil then
local origin_lexeme = getEntity(origin_lexeme_dv.value.id)
local origin_lexeme_lang = origin_lexeme:getLanguage()
local origin_lexeme_string = getLinkedLemmata(origin_lexeme) .. ' (' .. wb.getLabel(origin_lexeme_lang) .. ')'
if stmt.qualifiers ~= nil and stmt.qualifiers['P5886'] ~= nil then
local mode_of_derivation = stmt.qualifiers['P5886'][1].datavalue.value.id
if mode_of_derivation == 'Q1345001' then
origin_lexeme_string = ustring.gsub(i18n['etymology_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q845079' then
origin_lexeme_string = ustring.gsub(i18n['etymology_learned_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q56611986' then
origin_lexeme_string = ustring.gsub(i18n['etymology_inheritance'], '$1', origin_lexeme_string)
end
end
local origin_origin = getEtymology(origin_lexeme)
local new_etymology_string = ''
if origin_origin ~= '' and origin_origin ~= nil then
new_etymology_string = origin_lexeme_string .. ' ← ' .. origin_origin
else
new_etymology_string = origin_lexeme_string
end
end
if etymology == '' then
etymology = new_etymology_string
elseif origin_lexeme_string ~= nil then
etymology = etymology .. ' ' .. origin_lexeme_string
end
end
if current_roots ~= '' then
etymology = etymology .. ' ' .. current_roots
end
if current_combines ~= '' then
etymology = etymology .. '<br/>(' .. current_combines .. ')'
end
return etymology
end
function getPronunciation ( frame, current_lexeme, lang_name, lex_cat )
local pronunciations = {}
local base_form = getPronunciationBaseForm(lang_name, lex_cat )
if base_form ~= nil then
for i, stmt in pairs(base_form:getAllStatements('P443')) do -- উচ্চারণের অডিও
local pronunciation_file = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in pairs(stmt.qualifiers[property_id]) do
stmt_value = qual.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value))
end
end
end
if #specifiers > 0 then
specifier_text = table.concat(specifiers, "'', ''")
end
local audio_text
if specifier_text ~= '' then
audio_text = i18n['text_audio'] .. ' (' .. specifier_text .. ')'
else
audio_text = i18n['text_audio']
end
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title= i18n['template_audio'],
args = {lang_name, pronunciation_file, audio_text}
})
end
local ipa_transcription = base_form:getAllStatements('P898') -- - আধ্বব প্রতিলিপিকরণ
local iso15919_transcription = getOneStringForProperty(base_form, 'P5825') -- আইএসও ১৫৯১৯ প্রতিলিপিকরণ
local itrans = getOneStringForProperty(base_form, 'P8881') -- ITRANS
local iast = getOneStringForProperty(base_form, 'P7581') -- আইএএসটি প্রতিলিপিকরণ
-- @TODO: অডিও ও প্রতিলিপিকরণ দুটোই থাকলে সেই ক্ষেত্রে একটার ঠিক পরেই আরেকটা দেখানো উচিত
if #ipa_transcription ~= 0 then
for i, stmt in pairs(ipa_transcription) do
local ipa_text = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in ipairs(stmt.qualifiers[property_id]) do
table.insert(specifiers, wb.getLabel( qual.datavalue.value.id ))
end
end
end
if #specifiers > 0 then
specifier_text = "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
table.insert(pronunciations, '* ' .. specifier_text .. frame:expandTemplate{
title= i18n['template_ipa'],
args = {lang_name, ipa_text}
})
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
elseif lang_name == 'বাংলা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='bn-IPA',
})
elseif lang_name == 'আরবি' then
local lemma = current_lexeme:getLemma('ar')
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='ar-IPA',
args={lemma}
})
elseif lang_name == 'ফালা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fax-pron',
})
elseif lang_name == 'ফিনীয়' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fi-IPA',
})
end
if iso15919_transcription ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iso15919'] .. ': ' .. iso15919_transcription)
end
if itrans ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_itrans'] .. ': ' .. itrans)
end
if iast ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iast'] .. ': ' .. iast)
end
end -- {{আধ্বব|en|/ˈɪntəvjuː/}}
return table.concat(pronunciations, '\n')
end
function getAlternativeSpellings( current_lexeme )
local alt_spellings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P11577')) do -- বিকল্প বানান
if stmt.mainsnak.datavalue ~= nil then
table.insert(alt_spellings, '* ' .. getLinkedLemmata(getEntity(stmt.mainsnak.datavalue.value.id)))
end
end
return table.concat(alt_spellings, '\n')
end
local function heading_level(text, level)
local heading_delimiter = string.rep('=', level)
return heading_delimiter .. ' ' .. text .. ' ' .. heading_delimiter
end
function get_any_notes(sections, args, keys)
local notes = {}
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(notes, args[v])
end
end
return notes
end
function add_specific_notes(sections, notes)
for i, v in ipairs(notes) do
table.insert(sections, v)
end
end
function add_any_notes(sections, args, keys)
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(sections, args[v])
end
end
end
-- This calls frame:preprocess() instead of :callParserFunction() because the latter does not work for Wikifunctions function calls yet (see https://www.wikifunctions.org/wiki/Wikifunctions:Embedded_function_calls).
local function callWikifunctionsFunction(args, frame)
return frame:preprocess('{{#function:' .. args .. '}}')
end
local function buildLanguageAgnosticInflectionTable(forms)
local has_image = false
local form_images = {}
for i, form in ipairs(forms) do
local form_image = form:getAllStatements('P7407')
if next(form_image) ~= nil then
form_images[i] = form_image[1].mainsnak.datavalue.value
has_image = true
end
end
local table_class = "wikitable mw-collapsible sortable"
if not has_image then
table_class = table_class .. " mw-collapsed"
end
local text = "{| class='" .. table_class .. "' style='border:solid 1px rgb(80%,80%,100%); text-align:center;'\n"
text = text .. "|+ " .. i18n['heading_inflection_table'] .. "\n"
text = text .. "|- \n"
text = text .. "! " .. i18n['heading_form'] .. " !! " .. i18n['heading_grammatical_features']
if has_image then
text = text .. " !! " .. (i18n['heading_image'])
end
text = text .. " \n"
for i, form in ipairs(forms) do
local rep = form:getRepresentations()
local feat = form:getGrammaticalFeatures()
local rep_text = ""
for j, r in pairs(rep) do
if rep_text == "" then
rep_text = r[1]
else
rep_text = rep_text .. " / " .. r[1]
end
end
local feat_text = ""
if feat ~= nil then
for j, f in ipairs(feat) do
local label = wb.getLabel(f)
if feat_text == "" then
feat_text = label
else
feat_text = feat_text .. ", " .. label
end
end
end
text = text .. "|-\n"
text = text .. "| " .. (rep_text ~= "" and rep_text or "—")
text = text .. " || " .. (feat_text ~= "" and feat_text or "—")
if has_image then
local image_cell = "—"
if form_images[i] ~= nil then
image_cell = "[[চিত্র:" .. form_images[i] .. "|50px]]"
end
text = text .. " || " .. image_cell
end
text = text .. "\n"
end
text = text .. "|}"
return text
end
function p.all( frame )
local args = getArgs(frame)
local lexeme_id = args[1]
local current_lexeme = getEntity(lexeme_id)
local current_language = current_lexeme:getLanguage()
local senses = current_lexeme:getSenses()
local add_heading = true
forms = current_lexeme:getForms()
if args[2] ~= nil then
local val = mw.text.trim(tostring(args[2]))
if val == "false" or val == "0" or val == "না" then
add_heading = false
end
end
local references_seen = {}
local sections = {}
local lang_name = wb.getLabel(current_language)
if add_heading == true then
local lang_heading = "== " .. lang_name .. " =="
table.insert(sections, lang_heading)
end
local lex_cat = current_lexeme:getLexicalCategory()
lang_code = getLexemeLanguageCode(current_lexeme) -- This should remain available to all functions.
local title = mw.title.getCurrentTitle().text
local lang_category = getLanguageForCategories(current_language, title)
local cat = getCategory( lang_category, lex_cat )
local lex_cat_template
if cat ~= nil then
table.insert(sections, '===' .. cat .. frame:expandTemplate{
title = i18n['template_anchor'],
args = { lexeme_id }
} .. '===')
table.insert(sections, frame:expandTemplate{
title= i18n['template_lexeme'],
args = {lexeme_id}
})
add_any_notes(sections, args, i18n['manual_category'])
local etymology = getEtymology ( current_lexeme )
if etymology ~= '' and etymology ~= nil then
table.insert(sections, heading_level(i18n['heading_etymology'], 4))
table.insert(sections, tostring(etymology))
end
add_any_notes(sections, args, i18n['manual_etymology'])
local pronunciation = getPronunciation( frame, current_lexeme, lang_name, lex_cat )
if pronunciation ~= '' then
table.insert(sections, heading_level(i18n['heading_pronunciation'], 4))
table.insert(sections, tostring(pronunciation))
end
add_any_notes(sections, args, i18n['manual_pronunciation'])
local alternative_spellings = getAlternativeSpellings( current_lexeme )
if alternative_spellings ~= '' then
table.insert(sections, heading_level(i18n['heading_alternative_spellings'], 4))
table.insert(sections, alternative_spellings)
end
if lang_code ~= nil then
if lex_cat == 'Q34698' then -- বিশেষণ
lex_cat_template = safeExpand(frame, lang_code .. '-adj')
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষণ')
end
elseif lex_cat == 'Q1084' then
local gender
local stmts = current_lexeme:getAllStatements('P5185') -- ব্যাকরণগত লিঙ্গ
if #stmts ~= 0 then
local gender_qid = stmts[1].mainsnak.datavalue.value.id
if gender_qid == 'Q499327' then -- @TODO: Add checks for every possible circumstance
gender = 'm'
elseif gender_qid == 'Q1775415' then
gender = 'f'
end
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
if current_language == 'Q13955' then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='ar-noun', args={matched_lemma,gender}}
else
lex_cat_template = frame:expandTemplate{title='ar-noun', args={nil,gender}}
end
elseif current_language == 'Q29919' then
lex_cat_template = frame:expandTemplate{title='arz-noun', args={g=gender}}
elseif current_language == 'Q397' then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='la-noun', args={matched_lemma,g=gender}}
end
elseif current_language == 'Q11059' then
lex_cat_template = frame:expandTemplate{title='sa-noun', args={g=gender}}
elseif current_language ~= 'Q1860' then -- These templates require the gender to be passed as the 1st argument.
lex_cat_template = safeExpand(frame, lang_code .. '-noun', {gender})
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষ্য', {gender})
end
end
end
end
-- elseif lex_cat == 'Q147276' then
-- lex_cat_template = safeExpand(frame, lang_code .. '-proper noun', {gender})
-- if not lex_cat_template then
-- lex_cat_template = safeExpand(frame, lang_code .. '-নামবাচক বিশেষ্য', {gender})
-- end
end
if lex_cat_template ~= nil then
table.insert(sections, lex_cat_template)
end
local meanings, references_seen = unpack(getMeanings( frame, args, current_lexeme, senses, references_seen, lang_name))
if lex_cat_template == nil then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
table.insert(sections, heading_level(matched_lemma, 4))
else
table.insert(sections, '[[Category:যেসব ভুক্তিতে লেমার হেডিং দেখানো অসম্ভব]]')
end
end
table.insert(sections, tostring(meanings))
add_any_notes(sections, args, i18n['manual_meaning'])
local instance_of = current_lexeme:getBestStatements('P31') -- সত্ত্বার ধরন
if #instance_of ~= 0 then
local instance_of_entity = instance_of[1].mainsnak.datavalue.value.id
if instance_of_entity == 'Q40437546' or instance_of_entity == 'Q120831827' or instance_of_entity == 'Q120717979' then -- @TODO: generalise this so all types of roots are shown
local instance_of_value = '#' .. i18n['text_instance_of'] .. ' ' .. wb.getLabel(instance_of_entity)
table.insert(sections, instance_of_value)
end
end
local translations = getTranslations(frame, senses)
if translations ~= nil then
table.insert(sections, translations)
end
-- (!) বিশেষ ভাষার বিভক্তির সারণি যদি থাকে সেগুলো এখানে নতুন if বিবৃতি যোগ করা যায়।
if next(forms) ~= nil then
if current_language == 'Q9610' then -- বাংলা
local conjTable = require('মডিউল:আভিধানিক উপাত্ত/Q9610').getInflectionTable(frame, current_lexeme)
table.insert(sections, conjTable) -- Bengali adjectives do not need any table thanks to the bn-adj template.
--elseif current_language == 'Q13955' then -- আরবি
-- if lex_cat == 'Q1084' then
-- table.insert(sections, frame:expandTemplate{title='ar-decl-noun', args={lemma}})
-- end
--elseif current_language == 'Q188' then -- জার্মান
-- if lex_cat == 'Q1084' then
-- table.insert(sections, callWikifunctionsFunction('Z28602|' .. lexeme_id .. '|', frame)) -- German noun declension table, enable once T422299 is resolved
-- end
else
if current_language ~= 'Q1860' then -- ইংরেজি
table.insert(sections, buildLanguageAgnosticInflectionTable(forms))
end
end
end
local reference_notes = get_any_notes(sections, args, i18n['manual_reference'])
if #references_seen > 0 or #reference_notes > 0 then
table.insert(sections, heading_level(i18n['heading_references'], 4))
table.insert(sections, frame:extensionTag('references'))
add_specific_notes(sections, reference_notes)
end
local external_link_table = getExternalLinks ( current_lexeme )
if #external_link_table > 0 then
local external_links = '* ' .. table.concat(external_link_table, '\n* ')
table.insert(sections, heading_level(i18n['heading_external_links'], 4))
table.insert(sections, external_links)
end
add_any_notes(sections, args, i18n['manual_external_link'])
if #references_seen == 0 and #reference_notes == 0 and #external_link_table == 0 and #get_any_notes(sections, args, i18n['manual_external_link']) == 0 then
if i18n['category_rfref'][lang_category] ~= nil then
table.insert(sections, '[[Category:' .. i18n['category_rfref'][lang_category] .. ']]')
else
table.insert(sections, '[[Category:' .. i18n['category_rfref']['_'] .. ']]')
end
end
return table.concat(sections,"\n\n")
end
return p
hhtodtebctcfzladcukd1x68bvigmkc
507782
507781
2026-04-13T20:02:36Z
Redmin
6857
507782
Scribunto
text/plain
local p = {}
local i18n = require('মডিউল:আভিধানিক উপাত্ত/i18n')
local references = require('মডিউল:উইকিউপাত্ত তথ্যসূত্র বিন্যাসকরণ').format
local getArgs = require('Module:Arguments').getArgs
local formatter_urls = require('মডিউল:আভিধানিক উপাত্ত/urls').formatter_urls
local wb = mw.wikibase
local ustring = mw.ustring
local html = mw.html
local mw_lang = mw.language
local entity_cache = {}
local reference_cache = {}
local forms
local lang_code
local function countWords(string)
local count = 0
for word in ustring.gmatch(string, "%S+") do
count = count + 1
end
return count
end
local function normalizeLemmas(text)
text = mw.ustring.gsub(text, "[ً-ٟ]", "")
text = mw.ustring.gsub(text, "ٰ", "")
text = mw.ustring.gsub(text, "أ", "ا")
text = mw.ustring.gsub(text, "إ", "ا")
text = mw.ustring.gsub(text, "آ", "ا")
return text
end
local function serializeTable(val, name, skipnewlines, depth) -- https://stackoverflow.com/a/6081639
skipnewlines = skipnewlines or false
depth = depth or 0
local tmp = string.rep(" ", depth)
if name then tmp = tmp .. name .. " = " end
if type(val) == "table" then
tmp = tmp .. "{" .. (not skipnewlines and "\n" or "")
for k, v in pairs(val) do
tmp = tmp .. serializeTable(v, k, skipnewlines, depth + 1) .. "," .. (not skipnewlines and "\n" or "")
end
tmp = tmp .. string.rep(" ", depth) .. "}"
elseif type(val) == "number" then
tmp = tmp .. tostring(val)
elseif type(val) == "string" then
tmp = tmp .. string.format("%q", val)
elseif type(val) == "boolean" then
tmp = tmp .. (val and "true" or "false")
else
tmp = tmp .. "\"[inserializeable datatype:" .. type(val) .. "]\""
end
return tmp
end
-- Use this to safely expand templates when you are not sure that they exist.
local function safeExpand(frame, title, args)
local success, result = pcall(function()
return frame:expandTemplate{ title = title, args = args }
end)
if result:find('does not exist') then -- expandTemplate() doesn't seem to throw any error that can be handled with pcall() so string search is the only viable option.
return nil
end
return result
end
local function getReference( id, reference )
local out_id = nil
local url_value
if reference_cache[id] == nil then
local ref_text = references(reference, wb, mw_lang, i18n['content_lang_code'], i18n['wikipedia'])
if reference.snaks ~= nil then
if reference.snaks['P248'] ~= nil then
for _, snak in pairs(reference.snaks['P248']) do
if snak.datavalue and snak.datavalue.value.id == 'Q428' then -- কুরআন
ref_text = ustring.gsub(ref_text, 'নামক অনুচ্ছেদ', 'নং আয়াত')
break
end
end
end
if reference.snaks['P854'] ~= nil then
local snak = reference.snaks['P854'][1]
if snak.datavalue then
url_value = snak.datavalue.value
end
end
end
if url_value ~= nil then
ref_text = ref_text .. ', [' .. url_value .. ' সংযোগ]'
end
reference_cache[id] = ref_text
else
out_id = id
end
return {out_id, reference_cache[id]}
end
local function getEntity( id )
if entity_cache[id] == nil then
entity_cache[id] = wb.getEntity(id)
end
return entity_cache[id] ~= false and entity_cache[id] or nil
end
local function getLexemeLanguageCode(current_lexeme)
local lang_item_id = current_lexeme:getLanguage()
if lang_item_id == nil then
return nil
end
local lang_entity = getEntity(lang_item_id)
if lang_entity == nil then
return nil
end
for i, statement_property in ipairs({'P305','P424', 'P220'}) do -- আইইটিএফ ভাষা ট্যাগ, উইকিমিডিয়া ভাষা কোড, আইএসও ৬৩৯-৩
local statements = lang_entity:getBestStatements(statement_property)
if statements[1] ~= nil then
return statements[1].mainsnak.datavalue.value
end
end
return nil
end
-- Return the first form of the lexeme which has exactly the given grammatical feature.
local function formWithSingleGrammaticalFeature( item_id )
for i = 1, #forms do
local grammaticalFeatures = forms[i]:getGrammaticalFeatures()
if #grammaticalFeatures == 1 and grammaticalFeatures[1] == item_id then
return forms[i]
end
end
return nil
end
local function getArticleLinkTemplate(frame, stmt_value)
local template = ''
local sitelink = getEntity(stmt_value):getSitelink(i18n['wikipedia'])
if sitelink ~= nil then
template = frame:expandTemplate{
title=i18n['template_wikipedia'],
args={sitelink}
}
end
return template
end
local function getArticleLinks (frame, sense )
local article_links = ''
for i, stmt in pairs(sense:getAllStatements('P5137')) do -- এই অর্থের জন্য আইটেম
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
for i, stmt in pairs(sense:getAllStatements('P9970')) do -- এই অর্থের জন্য বিধেয়
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
return article_links
end
-- @TODO: Generalise
local function expandTemplateForProperty(frame, object, property, template)
local lemmas = {}
local n = 0
for _, stmt in pairs(object:getAllStatements(property)) do
local lex = wb.lexeme.splitLexemeId(stmt.mainsnak.datavalue.value.id)
lex = getEntity(lex)
n = n + 1
lemmas[n] = lex:getLemma(lang_code)
end
if not lang_code or n == 0 then
return ''
end
-- Build args: first lang_code, then lemmas
local args = {lang_code}
for i = 1, n do
args[#args + 1] = lemmas[i]
end
return frame:expandTemplate{
title = template,
args = args
}
end
local function getExternalLinks( entity ) -- T418639
local external_links = {}
if entity.claims == nil then return external_links end
for property_id, statements in pairs(entity.claims) do
local formatter_url = formatter_urls[property_id]
if formatter_url ~= nil then
local property_source = wb.getBestStatements(property_id, 'P9073')
local source_name
if next(property_source) ~= nil then
source_name = wb.getLabel(property_source[1].mainsnak.datavalue.value.id)
or property_source[1].mainsnak.datavalue.value.id
else
source_name = wb.getLabel(property_id) or property_id
end
for i = 1, #statements do
local stmt = statements[i]
if stmt.mainsnak.datavalue then
local formatted_link = ustring.gsub(
ustring.gsub(formatter_url, '$1', stmt.mainsnak.datavalue.value),
' ', '+'
)
table.insert(external_links,
'[' .. formatted_link .. ' ' .. source_name .. ']')
end
end
end
end
return external_links
end
p.getExternalLinks = getExternalLinks -- রেখে দিন যাতে ডিবাগিং সম্ভব হয়
local function termSpan( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( text )
return tostring( span )
end
local function termLink( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( '[[' .. text .. ']]' )
return tostring( span )
end
local function getLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termSpan(rep)
else
lemma_string = lemma_string .. '/' .. termSpan(rep)
end
end
return lemma_string
end
local function getLinkedLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termLink(rep)
else
lemma_string = lemma_string .. '/' .. termLink(rep)
end
end
return lemma_string
end
local function getExamples( current_lexeme, sense_id, references_seen )
local examples = html.create('dl')
for i, stmt in pairs(current_lexeme:getAllStatements('P5831')) do -- ব্যবহারের উদাহরণ
if stmt.qualifiers ~= nil and stmt.qualifiers['P6072'] ~= nil and stmt.qualifiers['P6072'][1].datavalue.value.id == sense_id then -- বিষয়ে লেক্সিমের অর্থ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
local example_form_strs = {}
if stmt.qualifiers['P1810'] ~= nil then
table.insert(example_form_strs, stmt.qualifiers['P1810'][1].datavalue.value)
elseif stmt.qualifiers['P5830'] ~= nil then
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
for i, rep in pairs(example_form:getRepresentations()) do
table.insert(example_form_strs, rep[1])
end
end
example_str = nil
for i, example_form_str in pairs(example_form_strs) do
new_example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
if new_example_text ~= example_text then
example_str = termSpan({new_example_text, example_lang})
break
end
new_example_text = example_text
end
if example_str == nil then
example_str = termSpan({example_text, example_lang})
end
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
end
for i, stmt in pairs(wb.getAllStatements(sense_id, 'P5831')) do -- ব্যবহারের উদাহরণ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
local example_form_str = nil
if stmt.qualifiers['P1810'] ~= nil then
example_form_str = stmt.qualifiers['P1810'][1].datavalue.value
end
if example_form_str == nil then
example_form_str = example_form:getRepresentation(i18n['content_lang_code'])
end
if example_form_str == nil then
example_form_str = example_form:getRepresentations()[1][1]
end
example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
example_str = termSpan({example_text, example_lang})
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
return { tostring(examples) , references_seen }
end
local function checkTitleCodePointInRange(title, start_point, end_point)
return ustring.find( title, '[' ..ustring.char(start_point) .. '-' .. ustring.char(end_point) .. ']' )
end
local function getLanguageForCategories( lang_id, current_page_title )
-- বিশেষ ভাষার জন্য
if lang_id == 'Q11051' then -- হিন্দি/উর্দু
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- উর্দু
lang_id = 'Q11051ur'
elseif checkTitleCodePointInRange(current_page_title, 0x0900, 0x097f) ~= nil then -- হিন্দি
lang_id = 'Q11051hi'
end
elseif lang_id == 'Q58635' then -- পাঞ্জাবি
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- শাহমুখী
lang_id = 'Q58635pnb'
elseif checkTitleCodePointInRange(current_page_title, 0x0a00, 0x0a7f) ~= nil then -- গুরুমুখী
lang_id = 'Q58635pa'
end
elseif lang_id == 'Q56356571' then -- নয়া ফার্সি ভাষা
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- ফার্সি (ইরান/আফগানিস্তান)
lang_id = 'Q56356571fa'
elseif checkTitleCodePointInRange(current_page_title, 0x0400, 0x04ff) ~= nil then -- তাজিক
lang_id = 'Q56356571tg'
end
end
return lang_id
end
local function getOneStringForProperty(object, property)
local val
local stmts = object:getAllStatements(property)
if #stmts ~= 0 then
val = stmts[1].mainsnak.datavalue.value
end
return val
end
local function getTranslations(frame, senses) -- TODO: woefully incomplete until T185313 and T199887 are resolved
if #senses == 0 then
return nil
end
local translation_set = {}
for i, sense in pairs(senses) do
for i, stmt in pairs(sense:getAllStatements('P5972')) do
local translation = stmt.mainsnak.datavalue.value.id
local lexeme_id = wb.lexeme.splitLexemeId(translation)
local language = wb.getLabel(getEntity(lexeme_id):getLanguage())
gloss = sense:getGloss('bn')
table.insert(translation_set, language .. ': ' .. getLinkedLemmata(getEntity(lexeme_id)) .. '<br/>')
end
end
if #translation_set == 0 then
return nil
end
local translations = frame:expandTemplate{title = i18n['template_trans-top'], args={gloss}}
translations = '====' .. i18n['heading_translation'] .. '==== \n' .. translations .. table.concat(translation_set, '\n') .. frame:expandTemplate{title = i18n['template_trans-bottom']}
return translations
end
local function getCategory( lang_category, cat_id )
local cat_text = wb.getLabel( cat_id )
local lang_categories = i18n['lang_categories']
if lang_categories[lang_category] ~= nil then
local lang_cat = lang_categories[lang_category]
if lang_cat[cat_id] ~= nil then
cat_text = cat_text .. '[[Category:' .. lang_cat[cat_id] .. ']]'
else
cat_text = cat_text .. '[[Category:' .. lang_cat['_'] .. ']]'
end
else
cat_text = cat_text .. '[[Category:' .. i18n['maintenance_no_lang_category_found'] .. ']]'
end
return cat_text
end
local createicon = function(langcode, entityID, propertyID)
langcode = langcode or ""
propertyID = propertyID or ""
local icon = " <span class='penicon autoconfirmed-show'>[["
-- " <span data-bridge-edit-flow='overwrite' class='penicon'>[[" -> enable Wikidata Bridge
.. "File:OOjs UI icon edit-ltr-progressive.svg |frameless |text-top |10px |alt="
.. i18n['edit_wikidata']
.. "|link=https://www.wikidata.org/entity/" .. entityID
if langcode ~= "" then icon = icon .. "?uselang=" .. langcode end
if propertyID ~= "" then icon = icon .. "#" .. propertyID end
icon = icon .. "|" .. i18n['edit_wikidata'] .. "]]</span>"
return icon
end
local function getMeanings( frame, args, current_lexeme, senses, references_seen, language_name)
if #senses == 0 then
return {createicon(i18n['content_lang_code'], current_lexeme:getId()) .. "''" .. i18n['text_category_rfdef'] .. "''" .. '[[Category:' .. i18n['category_rfdef'] .. ']]', references_seen}
end
local meanings = html.create( 'ol' )
for i, sense in pairs(senses) do
local gloss_text_parts = {}
local main_gloss_text = frame:expandTemplate{
title=i18n['template_anchor'],
args={sense:getId()}
}
local specifiers = {}
for k, property_id in ipairs({'P6084', 'P6191', 'P9488'}) do -- অবস্থান যেখানে শব্দার্থ ব্যবহৃত, যে রীতিতে শব্দার্থ ব্যবহৃত হয়, যে ক্ষেত্রে ব্যবহৃত
for i, stmt in pairs(sense:getAllStatements(property_id)) do
local stmt_value = stmt.mainsnak.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value, i18n['content_lang_code']))
end
end
if #specifiers > 0 then
main_gloss_text = main_gloss_text .. "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
local gloss = sense:getGloss( i18n['content_lang_code'] )
if gloss ~= nil then
if countWords(gloss) == 1 then
main_gloss_text = main_gloss_text .. "[[" .. gloss .. "#" .. i18n['content_lang_name'] .. "|" .. gloss .. "]]"
else
main_gloss_text = main_gloss_text .. gloss
end
if gloss:match('^প্রদত্ত%s*(%S-)%s*নাম$') then -- given names
main_gloss_text = main_gloss_text .. '[[' .. 'Category:' .. language_name .. ' ' .. i18n['category_given_names'] .. ']]'
end
else
local other_gloss_text = nil
local other_gloss_lang = nil
local item_label_gloss_parts = {}
for k, stmt in pairs(sense:getAllStatements('P5137')) do -- যদি 'এই অর্থের জন্য আইটেম' মানের বাংলা লেবেল থাকে
local stmt_value = stmt.mainsnak.datavalue.value.id
local stmt_label = wb.getLabel(stmt_value)
if stmt_label ~= nil then
table.insert(item_label_gloss_parts, '[[:d:' .. stmt_value .. '|' .. stmt_label .. ']]')
end
end
if #item_label_gloss_parts > 0 then
other_gloss_text = table.concat(item_label_gloss_parts, '; ')
end
if other_gloss_text == nil then
for i, fallback_lang in ipairs(mw_lang.getFallbacksFor( i18n['content_lang_code'] )) do
if sense:getGloss( fallback_lang ) ~= nil then
other_gloss_text, other_gloss_lang = sense:getGloss( fallback_lang )
end
end
if other_gloss_lang == nil then
local glosses = sense:getGlosses()
for j, gloss in pairs(glosses) do
other_gloss_text = gloss[1]
other_gloss_lang = gloss[2]
break
end
end
main_gloss_text = main_gloss_text .. other_gloss_text .. "<sup><em>" .. mw_lang.fetchLanguageName(other_gloss_lang, i18n['content_lang_code']) .. "</em></sup>"
else
main_gloss_text = main_gloss_text .. "''" .. other_gloss_text .. "''"
end
main_gloss_text = main_gloss_text .. '[[Category:' .. i18n['category_rfdef_equivalent'] .. ']]'
end
local synonym = expandTemplateForProperty(frame, sense, 'P5973', i18n['template_synonym'])
if synonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. synonym
end
local antonym = expandTemplateForProperty(frame, sense, 'P5974', i18n['template_antonym'])
if antonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. antonym
end
local hypernym = expandTemplateForProperty(frame, sense, 'P6593', i18n['template_hypernym'])
if hypernym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. hypernym
end
if lex_cat == 'Q1084' or lex_cat == 'Q147276' then -- noun or proper noun
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-noun'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
elseif lex_cat == 'Q34698' then
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-adj'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
end
table.insert(gloss_text_parts, main_gloss_text .. createicon(i18n['content_lang_code'], sense:getId()))
for i, stmt in pairs(sense:getAllStatements('P8394')) do -- টিপ্পনীর উদ্ধৃতি
gloss_quote = termSpan({stmt.mainsnak.datavalue.value.text, stmt.mainsnak.datavalue.value.language})
if stmt.references[1] ~= nil then
local got_reference = getReference ( stmt.references[1].hash, stmt.references[1] )
gloss_quote = '"' .. gloss_quote .. '" ' .. got_reference[2]
end
table.insert(references_seen, stmt.references[1].hash)
table.insert(gloss_text_parts, frame:extensionTag('ref', gloss_quote))
end
for i, stmt in pairs(sense:getAllStatements('P1343')) do -- বর্ণিত উৎস
-- TODO: do away with making fake reference objects
local fake_reference = { ['snaks'] = {} }
fake_reference.snaks['P248'] = { [1] = stmt.mainsnak }
qualifiers_order = stmt['qualifiers-order']
if qualifiers_order ~= nil then
for i, k in ipairs(qualifiers_order) do fake_reference.snaks[k] = stmt.qualifiers[k] end
end
fake_reference.hash = mw.hash.hashValue('sha3-512', serializeTable(fake_reference))
table.insert(references_seen, fake_reference.hash)
local got_reference = getReference(fake_reference.hash, fake_reference)
if got_reference[1] == nil then
table.insert(gloss_text_parts, frame:extensionTag('ref', got_reference[2], {name = fake_reference.hash}))
else
table.insert(gloss_text_parts, frame:extensionTag{name = 'ref', content='', args = {name = got_reference[1]}})
end
end
local first_sense_image = ''
local sense_images = sense:getAllStatements('P18')
if next(sense_images) ~= nil then
first_sense_image = sense_images[1].mainsnak.datavalue.value
end
if first_sense_image ~= '' then
table.insert(gloss_text_parts, '[[চিত্র:' .. first_sense_image .. "|thumb|'''" .. getLemmata(current_lexeme) .. "'''—" .. main_gloss_text .. ']]')
end
local idlinks = getExternalLinks(sense)
if #idlinks > 0 then
local idlinktext = '<small>('
for i, idlink in pairs(idlinks) do
idlinktext = idlinktext .. idlink .. '\n'
end
idlinktext = idlinktext .. ')</small>'
table.insert(gloss_text_parts, idlinktext)
end
local externallinks = getArticleLinks(frame, sense)
if externallinks ~= '' then
table.insert(gloss_text_parts, externallinks)
end
local new_notes = {}
local sense_keys = { sense:getId(), string.sub(sense:getId(), string.find(sense:getId(), '-')+1) }
for i, v in ipairs(sense_keys) do
if args[v] ~= nil then
table.insert(new_notes, args[v])
end
end
if #new_notes > 0 then
for i, v in ipairs(new_notes) do
if i == 1 then
table.insert(gloss_text_parts, '<br/>' .. v)
else
table.insert(gloss_text_parts, v)
end
end
end
local examples, references_seen = unpack(getExamples( current_lexeme, sense:getId(), references_seen ))
local gloss_text = table.concat(gloss_text_parts, '\n')
meanings:tag('li'):wikitext(gloss_text):wikitext(examples)
end
return {meanings, references_seen}
end
local function getPronunciationBaseForm( lang_name, lex_cat)
local base_form = nil
-- (!) অন্য ভাষার শব্দের যদি অন্য রকম মূল ফর্ম থাকে সেগুলো এখানে নতুন if বিবৃতি দিয়ে যোগ করা যায়।
if lang_name == 'বাংলা' then
if lex_cat == 'Q1084' then -- বিশেষ্য
base_form = formWithSingleGrammaticalFeature( 'Q131105' ) -- কর্তৃকারক
elseif lex_cat == 'Q24905' then -- ক্রিয়া
base_form = formWithSingleGrammaticalFeature( 'Q1350145' ) -- ক্রিয়া বিশেষ্য
end
end
if base_form == nil then
for i, form in pairs(forms) do
base_form = form
break
end
end
return base_form
end
local function getCombines( current_lexeme )
local combines = ''
local index_mappings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P5238')) do
if stmt.qualifiers ~= nil and stmt.qualifiers['P1545'] ~= nil then -- ক্রম
local current_index = tonumber(stmt.qualifiers['P1545'][1].datavalue.value)
index_mappings[current_index] = stmt
end
end
if #index_mappings ~= 0 then
for i, stmt in ipairs(index_mappings) do
local part_lexeme_id = stmt.mainsnak.datavalue.value.id
local part_lexeme = getEntity(part_lexeme_id)
local current_substring = getLinkedLemmata(part_lexeme)
local part_etymology = getEtymology(part_lexeme)
if part_etymology ~= '' and part_etymology ~= nil then
current_substring = current_substring .. ' (← ' .. part_etymology .. ')'
end
if combines == '' then
combines = current_substring
else -- @TODO: This shoukd use the 'affix' template instead.
combines = combines .. ' + ' .. current_substring
end
end
end
return combines
end
function getRoots ( current_lexeme )
local stmts = current_lexeme:getAllStatements('P5920')
if #stmts == 0 then return '' end
local root_lexeme = getEntity(stmts[1].mainsnak.datavalue.value.id)
return '√' .. getLinkedLemmata(root_lexeme)
end
function getEtymology ( current_lexeme )
-- TODO: see if any etymology chains are not possible to render
local etymology = ''
local current_combines = getCombines(current_lexeme)
local current_roots = getRoots(current_lexeme)
local stmts = current_lexeme:getAllStatements('P5191')
if #stmts == 0 then
if current_roots ~= '' and current_combines ~= '' then
return current_roots .. '<br/>(' .. current_combines .. ')'
elseif current_roots ~= '' then
return current_roots
else
return current_combines
end
end
for i, stmt in pairs(stmts) do
local origin_lexeme_dv = stmt.mainsnak.datavalue -- If this is nil, the origin lexeme is not known.
if origin_lexeme_dv ~= nil then
local origin_lexeme = getEntity(origin_lexeme_dv.value.id)
local origin_lexeme_lang = origin_lexeme:getLanguage()
local origin_lexeme_string = getLinkedLemmata(origin_lexeme) .. ' (' .. wb.getLabel(origin_lexeme_lang) .. ')'
if stmt.qualifiers ~= nil and stmt.qualifiers['P5886'] ~= nil then
local mode_of_derivation = stmt.qualifiers['P5886'][1].datavalue.value.id
if mode_of_derivation == 'Q1345001' then
origin_lexeme_string = ustring.gsub(i18n['etymology_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q845079' then
origin_lexeme_string = ustring.gsub(i18n['etymology_learned_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q56611986' then
origin_lexeme_string = ustring.gsub(i18n['etymology_inheritance'], '$1', origin_lexeme_string)
end
end
local origin_origin = getEtymology(origin_lexeme)
local new_etymology_string = ''
if origin_origin ~= '' and origin_origin ~= nil then
new_etymology_string = origin_lexeme_string .. ' ← ' .. origin_origin
else
new_etymology_string = origin_lexeme_string
end
end
if etymology == '' then
etymology = new_etymology_string
elseif origin_lexeme_string ~= nil then
etymology = etymology .. ' ' .. origin_lexeme_string
end
end
if current_roots ~= '' then
etymology = etymology .. ' ' .. current_roots
end
if current_combines ~= '' then
etymology = etymology .. '<br/>(' .. current_combines .. ')'
end
return etymology
end
local function getPronunciation ( frame, current_lexeme, lang_name, lex_cat )
local pronunciations = {}
local base_form = getPronunciationBaseForm(lang_name, lex_cat )
if base_form ~= nil then
for i, stmt in pairs(base_form:getAllStatements('P443')) do -- উচ্চারণের অডিও
local pronunciation_file = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in pairs(stmt.qualifiers[property_id]) do
local stmt_value = qual.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value))
end
end
end
if #specifiers > 0 then
specifier_text = table.concat(specifiers, "'', ''")
end
local audio_text
if specifier_text ~= '' then
audio_text = i18n['text_audio'] .. ' (' .. specifier_text .. ')'
else
audio_text = i18n['text_audio']
end
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title= i18n['template_audio'],
args = {lang_name, pronunciation_file, audio_text}
})
end
local ipa_transcription = base_form:getAllStatements('P898') -- - আধ্বব প্রতিলিপিকরণ
local iso15919_transcription = getOneStringForProperty(base_form, 'P5825') -- আইএসও ১৫৯১৯ প্রতিলিপিকরণ
local itrans = getOneStringForProperty(base_form, 'P8881') -- ITRANS
local iast = getOneStringForProperty(base_form, 'P7581') -- আইএএসটি প্রতিলিপিকরণ
-- @TODO: অডিও ও প্রতিলিপিকরণ দুটোই থাকলে সেই ক্ষেত্রে একটার ঠিক পরেই আরেকটা দেখানো উচিত
if #ipa_transcription ~= 0 then
for i, stmt in pairs(ipa_transcription) do
local ipa_text = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in ipairs(stmt.qualifiers[property_id]) do
table.insert(specifiers, wb.getLabel( qual.datavalue.value.id ))
end
end
end
if #specifiers > 0 then
specifier_text = "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
table.insert(pronunciations, '* ' .. specifier_text .. frame:expandTemplate{
title= i18n['template_ipa'],
args = {lang_name, ipa_text}
})
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
elseif lang_name == 'বাংলা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='bn-IPA',
})
elseif lang_name == 'আরবি' then
local lemma = current_lexeme:getLemma('ar')
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='ar-IPA',
args={lemma}
})
elseif lang_name == 'ফালা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fax-pron',
})
elseif lang_name == 'ফিনীয়' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fi-IPA',
})
end
if iso15919_transcription ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iso15919'] .. ': ' .. iso15919_transcription)
end
if itrans ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_itrans'] .. ': ' .. itrans)
end
if iast ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iast'] .. ': ' .. iast)
end
end -- {{আধ্বব|en|/ˈɪntəvjuː/}}
return table.concat(pronunciations, '\n')
end
function getAlternativeSpellings( current_lexeme )
local alt_spellings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P11577')) do -- বিকল্প বানান
if stmt.mainsnak.datavalue ~= nil then
table.insert(alt_spellings, '* ' .. getLinkedLemmata(getEntity(stmt.mainsnak.datavalue.value.id)))
end
end
return table.concat(alt_spellings, '\n')
end
local function heading_level(text, level)
local heading_delimiter = string.rep('=', level)
return heading_delimiter .. ' ' .. text .. ' ' .. heading_delimiter
end
function get_any_notes(sections, args, keys)
local notes = {}
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(notes, args[v])
end
end
return notes
end
function add_specific_notes(sections, notes)
for i, v in ipairs(notes) do
table.insert(sections, v)
end
end
function add_any_notes(sections, args, keys)
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(sections, args[v])
end
end
end
-- This calls frame:preprocess() instead of :callParserFunction() because the latter does not work for Wikifunctions function calls yet (see https://www.wikifunctions.org/wiki/Wikifunctions:Embedded_function_calls).
local function callWikifunctionsFunction(args, frame)
return frame:preprocess('{{#function:' .. args .. '}}')
end
local function buildLanguageAgnosticInflectionTable()
local has_image = false
local form_images = {}
for i, form in ipairs(forms) do
local form_image = form:getAllStatements('P7407')
if next(form_image) ~= nil then
form_images[i] = form_image[1].mainsnak.datavalue.value
has_image = true
end
end
local table_class = "wikitable mw-collapsible sortable"
if not has_image then
table_class = table_class .. " mw-collapsed"
end
local text = "{| class='" .. table_class .. "' style='border:solid 1px rgb(80%,80%,100%); text-align:center;'\n"
text = text .. "|+ " .. i18n['heading_inflection_table'] .. "\n"
text = text .. "|- \n"
text = text .. "! " .. i18n['heading_form'] .. " !! " .. i18n['heading_grammatical_features']
if has_image then
text = text .. " !! " .. (i18n['heading_image'])
end
text = text .. " \n"
for i, form in ipairs(forms) do
local rep = form:getRepresentations()
local feat = form:getGrammaticalFeatures()
local rep_text = ""
for j, r in pairs(rep) do
if rep_text == "" then
rep_text = r[1]
else
rep_text = rep_text .. " / " .. r[1]
end
end
local feat_text = ""
if feat ~= nil then
for j, f in ipairs(feat) do
local label = wb.getLabel(f)
if feat_text == "" then
feat_text = label
else
feat_text = feat_text .. ", " .. label
end
end
end
text = text .. "|-\n"
text = text .. "| " .. (rep_text ~= "" and rep_text or "—")
text = text .. " || " .. (feat_text ~= "" and feat_text or "—")
if has_image then
local image_cell = "—"
if form_images[i] ~= nil then
image_cell = "[[চিত্র:" .. form_images[i] .. "|50px]]"
end
text = text .. " || " .. image_cell
end
text = text .. "\n"
end
text = text .. "|}"
return text
end
function p.all( frame )
local args = getArgs(frame)
local lexeme_id = args[1]
local current_lexeme = getEntity(lexeme_id)
local current_language = current_lexeme:getLanguage()
local senses = current_lexeme:getSenses()
local add_heading = true
forms = current_lexeme:getForms()
if args[2] ~= nil then
local val = mw.text.trim(tostring(args[2]))
if val == "false" or val == "0" or val == "না" then
add_heading = false
end
end
local references_seen = {}
local sections = {}
local lang_name = wb.getLabel(current_language)
if add_heading == true then
local lang_heading = "== " .. lang_name .. " =="
table.insert(sections, lang_heading)
end
local lex_cat = current_lexeme:getLexicalCategory()
lang_code = getLexemeLanguageCode(current_lexeme) -- This should remain available to all functions.
local title = mw.title.getCurrentTitle().text
local lang_category = getLanguageForCategories(current_language, title)
local cat = getCategory( lang_category, lex_cat )
local lex_cat_template
if cat ~= nil then
table.insert(sections, '===' .. cat .. frame:expandTemplate{
title = i18n['template_anchor'],
args = { lexeme_id }
} .. '===')
table.insert(sections, frame:expandTemplate{
title= i18n['template_lexeme'],
args = {lexeme_id}
})
add_any_notes(sections, args, i18n['manual_category'])
local etymology = getEtymology ( current_lexeme )
if etymology ~= '' and etymology ~= nil then
table.insert(sections, heading_level(i18n['heading_etymology'], 4))
table.insert(sections, tostring(etymology))
end
add_any_notes(sections, args, i18n['manual_etymology'])
local pronunciation = getPronunciation( frame, current_lexeme, lang_name, lex_cat )
if pronunciation ~= '' then
table.insert(sections, heading_level(i18n['heading_pronunciation'], 4))
table.insert(sections, tostring(pronunciation))
end
add_any_notes(sections, args, i18n['manual_pronunciation'])
local alternative_spellings = getAlternativeSpellings( current_lexeme )
if alternative_spellings ~= '' then
table.insert(sections, heading_level(i18n['heading_alternative_spellings'], 4))
table.insert(sections, alternative_spellings)
end
if lang_code ~= nil then
if lex_cat == 'Q34698' then -- বিশেষণ
lex_cat_template = safeExpand(frame, lang_code .. '-adj')
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষণ')
end
elseif lex_cat == 'Q1084' then
local gender
local stmts = current_lexeme:getAllStatements('P5185') -- ব্যাকরণগত লিঙ্গ
if #stmts ~= 0 then
local gender_qid = stmts[1].mainsnak.datavalue.value.id
if gender_qid == 'Q499327' then -- @TODO: Add checks for every possible circumstance
gender = 'm'
elseif gender_qid == 'Q1775415' then
gender = 'f'
end
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
if current_language == 'Q13955' then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='ar-noun', args={matched_lemma,gender}}
else
lex_cat_template = frame:expandTemplate{title='ar-noun', args={nil,gender}}
end
elseif current_language == 'Q29919' then
lex_cat_template = frame:expandTemplate{title='arz-noun', args={g=gender}}
elseif current_language == 'Q397' then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='la-noun', args={matched_lemma,g=gender}}
end
elseif current_language == 'Q11059' then
lex_cat_template = frame:expandTemplate{title='sa-noun', args={g=gender}}
elseif current_language ~= 'Q1860' then -- These templates require the gender to be passed as the 1st argument.
lex_cat_template = safeExpand(frame, lang_code .. '-noun', {gender})
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষ্য', {gender})
end
end
end
end
-- elseif lex_cat == 'Q147276' then
-- lex_cat_template = safeExpand(frame, lang_code .. '-proper noun', {gender})
-- if not lex_cat_template then
-- lex_cat_template = safeExpand(frame, lang_code .. '-নামবাচক বিশেষ্য', {gender})
-- end
end
if lex_cat_template ~= nil then
table.insert(sections, lex_cat_template)
end
local meanings, references_seen = unpack(getMeanings( frame, args, current_lexeme, senses, references_seen, lang_name))
if lex_cat_template == nil then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
table.insert(sections, heading_level(matched_lemma, 4))
else
table.insert(sections, '[[Category:যেসব ভুক্তিতে লেমার হেডিং দেখানো অসম্ভব]]')
end
end
table.insert(sections, tostring(meanings))
add_any_notes(sections, args, i18n['manual_meaning'])
local instance_of = current_lexeme:getBestStatements('P31') -- সত্ত্বার ধরন
if #instance_of ~= 0 then
local instance_of_entity = instance_of[1].mainsnak.datavalue.value.id
if instance_of_entity == 'Q40437546' or instance_of_entity == 'Q120831827' or instance_of_entity == 'Q120717979' then -- @TODO: generalise this so all types of roots are shown
local instance_of_value = '#' .. i18n['text_instance_of'] .. ' ' .. wb.getLabel(instance_of_entity)
table.insert(sections, instance_of_value)
end
end
local translations = getTranslations(frame, senses)
if translations ~= nil then
table.insert(sections, translations)
end
-- (!) বিশেষ ভাষার বিভক্তির সারণি যদি থাকে সেগুলো এখানে নতুন if বিবৃতি যোগ করা যায়।
if next(forms) ~= nil then
if current_language == 'Q9610' then -- বাংলা
local conjTable = require('মডিউল:আভিধানিক উপাত্ত/Q9610').getInflectionTable(frame, current_lexeme)
table.insert(sections, conjTable) -- Bengali adjectives do not need any table thanks to the bn-adj template.
--elseif current_language == 'Q13955' then -- আরবি
-- if lex_cat == 'Q1084' then
-- table.insert(sections, frame:expandTemplate{title='ar-decl-noun', args={lemma}})
-- end
--elseif current_language == 'Q188' then -- জার্মান
-- if lex_cat == 'Q1084' then
-- table.insert(sections, callWikifunctionsFunction('Z28602|' .. lexeme_id .. '|', frame)) -- German noun declension table, enable once T422299 is resolved
-- end
else
if current_language ~= 'Q1860' then -- ইংরেজি
table.insert(sections, buildLanguageAgnosticInflectionTable())
end
end
end
local reference_notes = get_any_notes(sections, args, i18n['manual_reference'])
if #references_seen > 0 or #reference_notes > 0 then
table.insert(sections, heading_level(i18n['heading_references'], 4))
table.insert(sections, frame:extensionTag('references'))
add_specific_notes(sections, reference_notes)
end
local external_link_table = getExternalLinks ( current_lexeme )
if #external_link_table > 0 then
local external_links = '* ' .. table.concat(external_link_table, '\n* ')
table.insert(sections, heading_level(i18n['heading_external_links'], 4))
table.insert(sections, external_links)
end
add_any_notes(sections, args, i18n['manual_external_link'])
if #references_seen == 0 and #reference_notes == 0 and #external_link_table == 0 and #get_any_notes(sections, args, i18n['manual_external_link']) == 0 then
if i18n['category_rfref'][lang_category] ~= nil then
table.insert(sections, '[[Category:' .. i18n['category_rfref'][lang_category] .. ']]')
else
table.insert(sections, '[[Category:' .. i18n['category_rfref']['_'] .. ']]')
end
end
return table.concat(sections,"\n\n")
end
return p
rmvncsjbzxaqk15atqq0i30kki4iwal
507783
507782
2026-04-14T06:31:30Z
Redmin
6857
+ক্লীব লিঙ্গ
507783
Scribunto
text/plain
local p = {}
local i18n = require('মডিউল:আভিধানিক উপাত্ত/i18n')
local references = require('মডিউল:উইকিউপাত্ত তথ্যসূত্র বিন্যাসকরণ').format
local getArgs = require('Module:Arguments').getArgs
local formatter_urls = require('মডিউল:আভিধানিক উপাত্ত/urls').formatter_urls
local wb = mw.wikibase
local ustring = mw.ustring
local html = mw.html
local mw_lang = mw.language
local entity_cache = {}
local reference_cache = {}
local forms
local lang_code
local function countWords(string)
local count = 0
for word in ustring.gmatch(string, "%S+") do
count = count + 1
end
return count
end
local function normalizeLemmas(text)
text = mw.ustring.gsub(text, "[ً-ٟ]", "")
text = mw.ustring.gsub(text, "ٰ", "")
text = mw.ustring.gsub(text, "أ", "ا")
text = mw.ustring.gsub(text, "إ", "ا")
text = mw.ustring.gsub(text, "آ", "ا")
return text
end
local function serializeTable(val, name, skipnewlines, depth) -- https://stackoverflow.com/a/6081639
skipnewlines = skipnewlines or false
depth = depth or 0
local tmp = string.rep(" ", depth)
if name then tmp = tmp .. name .. " = " end
if type(val) == "table" then
tmp = tmp .. "{" .. (not skipnewlines and "\n" or "")
for k, v in pairs(val) do
tmp = tmp .. serializeTable(v, k, skipnewlines, depth + 1) .. "," .. (not skipnewlines and "\n" or "")
end
tmp = tmp .. string.rep(" ", depth) .. "}"
elseif type(val) == "number" then
tmp = tmp .. tostring(val)
elseif type(val) == "string" then
tmp = tmp .. string.format("%q", val)
elseif type(val) == "boolean" then
tmp = tmp .. (val and "true" or "false")
else
tmp = tmp .. "\"[inserializeable datatype:" .. type(val) .. "]\""
end
return tmp
end
-- Use this to safely expand templates when you are not sure that they exist.
local function safeExpand(frame, title, args)
local success, result = pcall(function()
return frame:expandTemplate{ title = title, args = args }
end)
if result:find('does not exist') then -- expandTemplate() doesn't seem to throw any error that can be handled with pcall() so string search is the only viable option.
return nil
end
return result
end
local function getReference( id, reference )
local out_id = nil
local url_value
if reference_cache[id] == nil then
local ref_text = references(reference, wb, mw_lang, i18n['content_lang_code'], i18n['wikipedia'])
if reference.snaks ~= nil then
if reference.snaks['P248'] ~= nil then
for _, snak in pairs(reference.snaks['P248']) do
if snak.datavalue and snak.datavalue.value.id == 'Q428' then -- কুরআন
ref_text = ustring.gsub(ref_text, 'নামক অনুচ্ছেদ', 'নং আয়াত')
break
end
end
end
if reference.snaks['P854'] ~= nil then
local snak = reference.snaks['P854'][1]
if snak.datavalue then
url_value = snak.datavalue.value
end
end
end
if url_value ~= nil then
ref_text = ref_text .. ', [' .. url_value .. ' সংযোগ]'
end
reference_cache[id] = ref_text
else
out_id = id
end
return {out_id, reference_cache[id]}
end
local function getEntity( id )
if entity_cache[id] == nil then
entity_cache[id] = wb.getEntity(id)
end
return entity_cache[id] ~= false and entity_cache[id] or nil
end
local function getLexemeLanguageCode(current_lexeme)
local lang_item_id = current_lexeme:getLanguage()
if lang_item_id == nil then
return nil
end
local lang_entity = getEntity(lang_item_id)
if lang_entity == nil then
return nil
end
for i, statement_property in ipairs({'P305','P424', 'P220'}) do -- আইইটিএফ ভাষা ট্যাগ, উইকিমিডিয়া ভাষা কোড, আইএসও ৬৩৯-৩
local statements = lang_entity:getBestStatements(statement_property)
if statements[1] ~= nil then
return statements[1].mainsnak.datavalue.value
end
end
return nil
end
-- Return the first form of the lexeme which has exactly the given grammatical feature.
local function formWithSingleGrammaticalFeature( item_id )
for i = 1, #forms do
local grammaticalFeatures = forms[i]:getGrammaticalFeatures()
if #grammaticalFeatures == 1 and grammaticalFeatures[1] == item_id then
return forms[i]
end
end
return nil
end
local function getArticleLinkTemplate(frame, stmt_value)
local template = ''
local sitelink = getEntity(stmt_value):getSitelink(i18n['wikipedia'])
if sitelink ~= nil then
template = frame:expandTemplate{
title=i18n['template_wikipedia'],
args={sitelink}
}
end
return template
end
local function getArticleLinks (frame, sense )
local article_links = ''
for i, stmt in pairs(sense:getAllStatements('P5137')) do -- এই অর্থের জন্য আইটেম
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
for i, stmt in pairs(sense:getAllStatements('P9970')) do -- এই অর্থের জন্য বিধেয়
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
return article_links
end
-- @TODO: Generalise
local function expandTemplateForProperty(frame, object, property, template)
local lemmas = {}
local n = 0
for _, stmt in pairs(object:getAllStatements(property)) do
local lex = wb.lexeme.splitLexemeId(stmt.mainsnak.datavalue.value.id)
lex = getEntity(lex)
n = n + 1
lemmas[n] = lex:getLemma(lang_code)
end
if not lang_code or n == 0 then
return ''
end
-- Build args: first lang_code, then lemmas
local args = {lang_code}
for i = 1, n do
args[#args + 1] = lemmas[i]
end
return frame:expandTemplate{
title = template,
args = args
}
end
local function getExternalLinks( entity ) -- T418639
local external_links = {}
if entity.claims == nil then return external_links end
for property_id, statements in pairs(entity.claims) do
local formatter_url = formatter_urls[property_id]
if formatter_url ~= nil then
local property_source = wb.getBestStatements(property_id, 'P9073')
local source_name
if next(property_source) ~= nil then
source_name = wb.getLabel(property_source[1].mainsnak.datavalue.value.id)
or property_source[1].mainsnak.datavalue.value.id
else
source_name = wb.getLabel(property_id) or property_id
end
for i = 1, #statements do
local stmt = statements[i]
if stmt.mainsnak.datavalue then
local formatted_link = ustring.gsub(
ustring.gsub(formatter_url, '$1', stmt.mainsnak.datavalue.value),
' ', '+'
)
table.insert(external_links,
'[' .. formatted_link .. ' ' .. source_name .. ']')
end
end
end
end
return external_links
end
p.getExternalLinks = getExternalLinks -- রেখে দিন যাতে ডিবাগিং সম্ভব হয়
local function termSpan( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( text )
return tostring( span )
end
local function termLink( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( '[[' .. text .. ']]' )
return tostring( span )
end
local function getLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termSpan(rep)
else
lemma_string = lemma_string .. '/' .. termSpan(rep)
end
end
return lemma_string
end
local function getLinkedLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termLink(rep)
else
lemma_string = lemma_string .. '/' .. termLink(rep)
end
end
return lemma_string
end
local function getExamples( current_lexeme, sense_id, references_seen )
local examples = html.create('dl')
for i, stmt in pairs(current_lexeme:getAllStatements('P5831')) do -- ব্যবহারের উদাহরণ
if stmt.qualifiers ~= nil and stmt.qualifiers['P6072'] ~= nil and stmt.qualifiers['P6072'][1].datavalue.value.id == sense_id then -- বিষয়ে লেক্সিমের অর্থ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
local example_form_strs = {}
if stmt.qualifiers['P1810'] ~= nil then
table.insert(example_form_strs, stmt.qualifiers['P1810'][1].datavalue.value)
elseif stmt.qualifiers['P5830'] ~= nil then
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
for i, rep in pairs(example_form:getRepresentations()) do
table.insert(example_form_strs, rep[1])
end
end
example_str = nil
for i, example_form_str in pairs(example_form_strs) do
new_example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
if new_example_text ~= example_text then
example_str = termSpan({new_example_text, example_lang})
break
end
new_example_text = example_text
end
if example_str == nil then
example_str = termSpan({example_text, example_lang})
end
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
end
for i, stmt in pairs(wb.getAllStatements(sense_id, 'P5831')) do -- ব্যবহারের উদাহরণ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
local example_form_str = nil
if stmt.qualifiers['P1810'] ~= nil then
example_form_str = stmt.qualifiers['P1810'][1].datavalue.value
end
if example_form_str == nil then
example_form_str = example_form:getRepresentation(i18n['content_lang_code'])
end
if example_form_str == nil then
example_form_str = example_form:getRepresentations()[1][1]
end
example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
example_str = termSpan({example_text, example_lang})
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
return { tostring(examples) , references_seen }
end
local function checkTitleCodePointInRange(title, start_point, end_point)
return ustring.find( title, '[' ..ustring.char(start_point) .. '-' .. ustring.char(end_point) .. ']' )
end
local function getLanguageForCategories( lang_id, current_page_title )
-- বিশেষ ভাষার জন্য
if lang_id == 'Q11051' then -- হিন্দি/উর্দু
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- উর্দু
lang_id = 'Q11051ur'
elseif checkTitleCodePointInRange(current_page_title, 0x0900, 0x097f) ~= nil then -- হিন্দি
lang_id = 'Q11051hi'
end
elseif lang_id == 'Q58635' then -- পাঞ্জাবি
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- শাহমুখী
lang_id = 'Q58635pnb'
elseif checkTitleCodePointInRange(current_page_title, 0x0a00, 0x0a7f) ~= nil then -- গুরুমুখী
lang_id = 'Q58635pa'
end
elseif lang_id == 'Q56356571' then -- নয়া ফার্সি ভাষা
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- ফার্সি (ইরান/আফগানিস্তান)
lang_id = 'Q56356571fa'
elseif checkTitleCodePointInRange(current_page_title, 0x0400, 0x04ff) ~= nil then -- তাজিক
lang_id = 'Q56356571tg'
end
end
return lang_id
end
local function getOneStringForProperty(object, property)
local val
local stmts = object:getAllStatements(property)
if #stmts ~= 0 then
val = stmts[1].mainsnak.datavalue.value
end
return val
end
local function getTranslations(frame, senses) -- TODO: woefully incomplete until T185313 and T199887 are resolved
if #senses == 0 then
return nil
end
local translation_set = {}
for i, sense in pairs(senses) do
for i, stmt in pairs(sense:getAllStatements('P5972')) do
local translation = stmt.mainsnak.datavalue.value.id
local lexeme_id = wb.lexeme.splitLexemeId(translation)
local language = wb.getLabel(getEntity(lexeme_id):getLanguage())
gloss = sense:getGloss('bn')
table.insert(translation_set, language .. ': ' .. getLinkedLemmata(getEntity(lexeme_id)) .. '<br/>')
end
end
if #translation_set == 0 then
return nil
end
local translations = frame:expandTemplate{title = i18n['template_trans-top'], args={gloss}}
translations = '====' .. i18n['heading_translation'] .. '==== \n' .. translations .. table.concat(translation_set, '\n') .. frame:expandTemplate{title = i18n['template_trans-bottom']}
return translations
end
local function getCategory( lang_category, cat_id )
local cat_text = wb.getLabel( cat_id )
local lang_categories = i18n['lang_categories']
if lang_categories[lang_category] ~= nil then
local lang_cat = lang_categories[lang_category]
if lang_cat[cat_id] ~= nil then
cat_text = cat_text .. '[[Category:' .. lang_cat[cat_id] .. ']]'
else
cat_text = cat_text .. '[[Category:' .. lang_cat['_'] .. ']]'
end
else
cat_text = cat_text .. '[[Category:' .. i18n['maintenance_no_lang_category_found'] .. ']]'
end
return cat_text
end
local createicon = function(langcode, entityID, propertyID)
langcode = langcode or ""
propertyID = propertyID or ""
local icon = " <span class='penicon autoconfirmed-show'>[["
-- " <span data-bridge-edit-flow='overwrite' class='penicon'>[[" -> enable Wikidata Bridge
.. "File:OOjs UI icon edit-ltr-progressive.svg |frameless |text-top |10px |alt="
.. i18n['edit_wikidata']
.. "|link=https://www.wikidata.org/entity/" .. entityID
if langcode ~= "" then icon = icon .. "?uselang=" .. langcode end
if propertyID ~= "" then icon = icon .. "#" .. propertyID end
icon = icon .. "|" .. i18n['edit_wikidata'] .. "]]</span>"
return icon
end
local function getMeanings( frame, args, current_lexeme, senses, references_seen, language_name)
if #senses == 0 then
return {createicon(i18n['content_lang_code'], current_lexeme:getId()) .. "''" .. i18n['text_category_rfdef'] .. "''" .. '[[Category:' .. i18n['category_rfdef'] .. ']]', references_seen}
end
local meanings = html.create( 'ol' )
for i, sense in pairs(senses) do
local gloss_text_parts = {}
local main_gloss_text = frame:expandTemplate{
title=i18n['template_anchor'],
args={sense:getId()}
}
local specifiers = {}
for k, property_id in ipairs({'P6084', 'P6191', 'P9488'}) do -- অবস্থান যেখানে শব্দার্থ ব্যবহৃত, যে রীতিতে শব্দার্থ ব্যবহৃত হয়, যে ক্ষেত্রে ব্যবহৃত
for i, stmt in pairs(sense:getAllStatements(property_id)) do
local stmt_value = stmt.mainsnak.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value, i18n['content_lang_code']))
end
end
if #specifiers > 0 then
main_gloss_text = main_gloss_text .. "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
local gloss = sense:getGloss( i18n['content_lang_code'] )
if gloss ~= nil then
if countWords(gloss) == 1 then
main_gloss_text = main_gloss_text .. "[[" .. gloss .. "#" .. i18n['content_lang_name'] .. "|" .. gloss .. "]]"
else
main_gloss_text = main_gloss_text .. gloss
end
if gloss:match('^প্রদত্ত%s*(%S-)%s*নাম$') then -- given names
main_gloss_text = main_gloss_text .. '[[' .. 'Category:' .. language_name .. ' ' .. i18n['category_given_names'] .. ']]'
end
else
local other_gloss_text = nil
local other_gloss_lang = nil
local item_label_gloss_parts = {}
for k, stmt in pairs(sense:getAllStatements('P5137')) do -- যদি 'এই অর্থের জন্য আইটেম' মানের বাংলা লেবেল থাকে
local stmt_value = stmt.mainsnak.datavalue.value.id
local stmt_label = wb.getLabel(stmt_value)
if stmt_label ~= nil then
table.insert(item_label_gloss_parts, '[[:d:' .. stmt_value .. '|' .. stmt_label .. ']]')
end
end
if #item_label_gloss_parts > 0 then
other_gloss_text = table.concat(item_label_gloss_parts, '; ')
end
if other_gloss_text == nil then
for i, fallback_lang in ipairs(mw_lang.getFallbacksFor( i18n['content_lang_code'] )) do
if sense:getGloss( fallback_lang ) ~= nil then
other_gloss_text, other_gloss_lang = sense:getGloss( fallback_lang )
end
end
if other_gloss_lang == nil then
local glosses = sense:getGlosses()
for j, gloss in pairs(glosses) do
other_gloss_text = gloss[1]
other_gloss_lang = gloss[2]
break
end
end
main_gloss_text = main_gloss_text .. other_gloss_text .. "<sup><em>" .. mw_lang.fetchLanguageName(other_gloss_lang, i18n['content_lang_code']) .. "</em></sup>"
else
main_gloss_text = main_gloss_text .. "''" .. other_gloss_text .. "''"
end
main_gloss_text = main_gloss_text .. '[[Category:' .. i18n['category_rfdef_equivalent'] .. ']]'
end
local synonym = expandTemplateForProperty(frame, sense, 'P5973', i18n['template_synonym'])
if synonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. synonym
end
local antonym = expandTemplateForProperty(frame, sense, 'P5974', i18n['template_antonym'])
if antonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. antonym
end
local hypernym = expandTemplateForProperty(frame, sense, 'P6593', i18n['template_hypernym'])
if hypernym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. hypernym
end
if lex_cat == 'Q1084' or lex_cat == 'Q147276' then -- noun or proper noun
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-noun'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
elseif lex_cat == 'Q34698' then
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-adj'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
end
table.insert(gloss_text_parts, main_gloss_text .. createicon(i18n['content_lang_code'], sense:getId()))
for i, stmt in pairs(sense:getAllStatements('P8394')) do -- টিপ্পনীর উদ্ধৃতি
gloss_quote = termSpan({stmt.mainsnak.datavalue.value.text, stmt.mainsnak.datavalue.value.language})
if stmt.references[1] ~= nil then
local got_reference = getReference ( stmt.references[1].hash, stmt.references[1] )
gloss_quote = '"' .. gloss_quote .. '" ' .. got_reference[2]
end
table.insert(references_seen, stmt.references[1].hash)
table.insert(gloss_text_parts, frame:extensionTag('ref', gloss_quote))
end
for i, stmt in pairs(sense:getAllStatements('P1343')) do -- বর্ণিত উৎস
-- TODO: do away with making fake reference objects
local fake_reference = { ['snaks'] = {} }
fake_reference.snaks['P248'] = { [1] = stmt.mainsnak }
qualifiers_order = stmt['qualifiers-order']
if qualifiers_order ~= nil then
for i, k in ipairs(qualifiers_order) do fake_reference.snaks[k] = stmt.qualifiers[k] end
end
fake_reference.hash = mw.hash.hashValue('sha3-512', serializeTable(fake_reference))
table.insert(references_seen, fake_reference.hash)
local got_reference = getReference(fake_reference.hash, fake_reference)
if got_reference[1] == nil then
table.insert(gloss_text_parts, frame:extensionTag('ref', got_reference[2], {name = fake_reference.hash}))
else
table.insert(gloss_text_parts, frame:extensionTag{name = 'ref', content='', args = {name = got_reference[1]}})
end
end
local first_sense_image = ''
local sense_images = sense:getAllStatements('P18')
if next(sense_images) ~= nil then
first_sense_image = sense_images[1].mainsnak.datavalue.value
end
if first_sense_image ~= '' then
table.insert(gloss_text_parts, '[[চিত্র:' .. first_sense_image .. "|thumb|'''" .. getLemmata(current_lexeme) .. "'''—" .. main_gloss_text .. ']]')
end
local idlinks = getExternalLinks(sense)
if #idlinks > 0 then
local idlinktext = '<small>('
for i, idlink in pairs(idlinks) do
idlinktext = idlinktext .. idlink .. '\n'
end
idlinktext = idlinktext .. ')</small>'
table.insert(gloss_text_parts, idlinktext)
end
local externallinks = getArticleLinks(frame, sense)
if externallinks ~= '' then
table.insert(gloss_text_parts, externallinks)
end
local new_notes = {}
local sense_keys = { sense:getId(), string.sub(sense:getId(), string.find(sense:getId(), '-')+1) }
for i, v in ipairs(sense_keys) do
if args[v] ~= nil then
table.insert(new_notes, args[v])
end
end
if #new_notes > 0 then
for i, v in ipairs(new_notes) do
if i == 1 then
table.insert(gloss_text_parts, '<br/>' .. v)
else
table.insert(gloss_text_parts, v)
end
end
end
local examples, references_seen = unpack(getExamples( current_lexeme, sense:getId(), references_seen ))
local gloss_text = table.concat(gloss_text_parts, '\n')
meanings:tag('li'):wikitext(gloss_text):wikitext(examples)
end
return {meanings, references_seen}
end
local function getPronunciationBaseForm( lang_name, lex_cat)
local base_form = nil
-- (!) অন্য ভাষার শব্দের যদি অন্য রকম মূল ফর্ম থাকে সেগুলো এখানে নতুন if বিবৃতি দিয়ে যোগ করা যায়।
if lang_name == 'বাংলা' then
if lex_cat == 'Q1084' then -- বিশেষ্য
base_form = formWithSingleGrammaticalFeature( 'Q131105' ) -- কর্তৃকারক
elseif lex_cat == 'Q24905' then -- ক্রিয়া
base_form = formWithSingleGrammaticalFeature( 'Q1350145' ) -- ক্রিয়া বিশেষ্য
end
end
if base_form == nil then
for i, form in pairs(forms) do
base_form = form
break
end
end
return base_form
end
local function getCombines( current_lexeme )
local combines = ''
local index_mappings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P5238')) do
if stmt.qualifiers ~= nil and stmt.qualifiers['P1545'] ~= nil then -- ক্রম
local current_index = tonumber(stmt.qualifiers['P1545'][1].datavalue.value)
index_mappings[current_index] = stmt
end
end
if #index_mappings ~= 0 then
for i, stmt in ipairs(index_mappings) do
local part_lexeme_id = stmt.mainsnak.datavalue.value.id
local part_lexeme = getEntity(part_lexeme_id)
local current_substring = getLinkedLemmata(part_lexeme)
local part_etymology = getEtymology(part_lexeme)
if part_etymology ~= '' and part_etymology ~= nil then
current_substring = current_substring .. ' (← ' .. part_etymology .. ')'
end
if combines == '' then
combines = current_substring
else -- @TODO: This shoukd use the 'affix' template instead.
combines = combines .. ' + ' .. current_substring
end
end
end
return combines
end
function getRoots ( current_lexeme )
local stmts = current_lexeme:getAllStatements('P5920')
if #stmts == 0 then return '' end
local root_lexeme = getEntity(stmts[1].mainsnak.datavalue.value.id)
return '√' .. getLinkedLemmata(root_lexeme)
end
function getEtymology ( current_lexeme )
-- TODO: see if any etymology chains are not possible to render
local etymology = ''
local current_combines = getCombines(current_lexeme)
local current_roots = getRoots(current_lexeme)
local stmts = current_lexeme:getAllStatements('P5191')
if #stmts == 0 then
if current_roots ~= '' and current_combines ~= '' then
return current_roots .. '<br/>(' .. current_combines .. ')'
elseif current_roots ~= '' then
return current_roots
else
return current_combines
end
end
for i, stmt in pairs(stmts) do
local origin_lexeme_dv = stmt.mainsnak.datavalue -- If this is nil, the origin lexeme is not known.
if origin_lexeme_dv ~= nil then
local origin_lexeme = getEntity(origin_lexeme_dv.value.id)
local origin_lexeme_lang = origin_lexeme:getLanguage()
local origin_lexeme_string = getLinkedLemmata(origin_lexeme) .. ' (' .. wb.getLabel(origin_lexeme_lang) .. ')'
if stmt.qualifiers ~= nil and stmt.qualifiers['P5886'] ~= nil then
local mode_of_derivation = stmt.qualifiers['P5886'][1].datavalue.value.id
if mode_of_derivation == 'Q1345001' then
origin_lexeme_string = ustring.gsub(i18n['etymology_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q845079' then
origin_lexeme_string = ustring.gsub(i18n['etymology_learned_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q56611986' then
origin_lexeme_string = ustring.gsub(i18n['etymology_inheritance'], '$1', origin_lexeme_string)
end
end
local origin_origin = getEtymology(origin_lexeme)
local new_etymology_string = ''
if origin_origin ~= '' and origin_origin ~= nil then
new_etymology_string = origin_lexeme_string .. ' ← ' .. origin_origin
else
new_etymology_string = origin_lexeme_string
end
end
if etymology == '' then
etymology = new_etymology_string
elseif origin_lexeme_string ~= nil then
etymology = etymology .. ' ' .. origin_lexeme_string
end
end
if current_roots ~= '' then
etymology = etymology .. ' ' .. current_roots
end
if current_combines ~= '' then
etymology = etymology .. '<br/>(' .. current_combines .. ')'
end
return etymology
end
local function getPronunciation ( frame, current_lexeme, lang_name, lex_cat )
local pronunciations = {}
local base_form = getPronunciationBaseForm(lang_name, lex_cat )
if base_form ~= nil then
for i, stmt in pairs(base_form:getAllStatements('P443')) do -- উচ্চারণের অডিও
local pronunciation_file = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in pairs(stmt.qualifiers[property_id]) do
local stmt_value = qual.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value))
end
end
end
if #specifiers > 0 then
specifier_text = table.concat(specifiers, "'', ''")
end
local audio_text
if specifier_text ~= '' then
audio_text = i18n['text_audio'] .. ' (' .. specifier_text .. ')'
else
audio_text = i18n['text_audio']
end
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title= i18n['template_audio'],
args = {lang_name, pronunciation_file, audio_text}
})
end
local ipa_transcription = base_form:getAllStatements('P898') -- - আধ্বব প্রতিলিপিকরণ
local iso15919_transcription = getOneStringForProperty(base_form, 'P5825') -- আইএসও ১৫৯১৯ প্রতিলিপিকরণ
local itrans = getOneStringForProperty(base_form, 'P8881') -- ITRANS
local iast = getOneStringForProperty(base_form, 'P7581') -- আইএএসটি প্রতিলিপিকরণ
-- @TODO: অডিও ও প্রতিলিপিকরণ দুটোই থাকলে সেই ক্ষেত্রে একটার ঠিক পরেই আরেকটা দেখানো উচিত
if #ipa_transcription ~= 0 then
for i, stmt in pairs(ipa_transcription) do
local ipa_text = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in ipairs(stmt.qualifiers[property_id]) do
table.insert(specifiers, wb.getLabel( qual.datavalue.value.id ))
end
end
end
if #specifiers > 0 then
specifier_text = "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
table.insert(pronunciations, '* ' .. specifier_text .. frame:expandTemplate{
title= i18n['template_ipa'],
args = {lang_name, ipa_text}
})
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
elseif lang_name == 'বাংলা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='bn-IPA',
})
elseif lang_name == 'আরবি' then
local lemma = current_lexeme:getLemma('ar')
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='ar-IPA',
args={lemma}
})
elseif lang_name == 'ফালা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fax-pron',
})
elseif lang_name == 'ফিনীয়' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fi-IPA',
})
end
if iso15919_transcription ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iso15919'] .. ': ' .. iso15919_transcription)
end
if itrans ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_itrans'] .. ': ' .. itrans)
end
if iast ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iast'] .. ': ' .. iast)
end
end -- {{আধ্বব|en|/ˈɪntəvjuː/}}
return table.concat(pronunciations, '\n')
end
function getAlternativeSpellings( current_lexeme )
local alt_spellings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P11577')) do -- বিকল্প বানান
if stmt.mainsnak.datavalue ~= nil then
table.insert(alt_spellings, '* ' .. getLinkedLemmata(getEntity(stmt.mainsnak.datavalue.value.id)))
end
end
return table.concat(alt_spellings, '\n')
end
local function heading_level(text, level)
local heading_delimiter = string.rep('=', level)
return heading_delimiter .. ' ' .. text .. ' ' .. heading_delimiter
end
function get_any_notes(sections, args, keys)
local notes = {}
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(notes, args[v])
end
end
return notes
end
function add_specific_notes(sections, notes)
for i, v in ipairs(notes) do
table.insert(sections, v)
end
end
function add_any_notes(sections, args, keys)
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(sections, args[v])
end
end
end
-- This calls frame:preprocess() instead of :callParserFunction() because the latter does not work for Wikifunctions function calls yet (see https://www.wikifunctions.org/wiki/Wikifunctions:Embedded_function_calls).
local function callWikifunctionsFunction(args, frame)
return frame:preprocess('{{#function:' .. args .. '}}')
end
local function getMatchingLemmaForPageTitle(lexeme, title)
local lemmas = lexeme:getLemmas()
local matched_lemma
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
end
local function buildLanguageAgnosticInflectionTable()
local has_image = false
local form_images = {}
for i, form in ipairs(forms) do
local form_image = form:getAllStatements('P7407')
if next(form_image) ~= nil then
form_images[i] = form_image[1].mainsnak.datavalue.value
has_image = true
end
end
local table_class = "wikitable mw-collapsible sortable"
if not has_image then
table_class = table_class .. " mw-collapsed"
end
local text = "{| class='" .. table_class .. "' style='border:solid 1px rgb(80%,80%,100%); text-align:center;'\n"
text = text .. "|+ " .. i18n['heading_inflection_table'] .. "\n"
text = text .. "|- \n"
text = text .. "! " .. i18n['heading_form'] .. " !! " .. i18n['heading_grammatical_features']
if has_image then
text = text .. " !! " .. (i18n['heading_image'])
end
text = text .. " \n"
for i, form in ipairs(forms) do
local rep = form:getRepresentations()
local feat = form:getGrammaticalFeatures()
local rep_text = ""
for j, r in pairs(rep) do
if rep_text == "" then
rep_text = r[1]
else
rep_text = rep_text .. " / " .. r[1]
end
end
local feat_text = ""
if feat ~= nil then
for j, f in ipairs(feat) do
local label = wb.getLabel(f)
if feat_text == "" then
feat_text = label
else
feat_text = feat_text .. ", " .. label
end
end
end
text = text .. "|-\n"
text = text .. "| " .. (rep_text ~= "" and rep_text or "—")
text = text .. " || " .. (feat_text ~= "" and feat_text or "—")
if has_image then
local image_cell = "—"
if form_images[i] ~= nil then
image_cell = "[[চিত্র:" .. form_images[i] .. "|50px]]"
end
text = text .. " || " .. image_cell
end
text = text .. "\n"
end
text = text .. "|}"
return text
end
function p.all( frame )
local args = getArgs(frame)
local lexeme_id = args[1]
local current_lexeme = getEntity(lexeme_id)
local current_language = current_lexeme:getLanguage()
local senses = current_lexeme:getSenses()
local add_heading = true
forms = current_lexeme:getForms()
if args[2] ~= nil then
local val = mw.text.trim(tostring(args[2]))
if val == "false" or val == "0" or val == "না" then
add_heading = false
end
end
local references_seen = {}
local sections = {}
local lang_name = wb.getLabel(current_language)
if add_heading == true then
local lang_heading = "== " .. lang_name .. " =="
table.insert(sections, lang_heading)
end
local lex_cat = current_lexeme:getLexicalCategory()
lang_code = getLexemeLanguageCode(current_lexeme) -- This should remain available to all functions.
local title = mw.title.getCurrentTitle().text
local lang_category = getLanguageForCategories(current_language, title)
local cat = getCategory( lang_category, lex_cat )
local lex_cat_template
if cat ~= nil then
table.insert(sections, '===' .. cat .. frame:expandTemplate{
title = i18n['template_anchor'],
args = { lexeme_id }
} .. '===')
table.insert(sections, frame:expandTemplate{
title= i18n['template_lexeme'],
args = {lexeme_id}
})
add_any_notes(sections, args, i18n['manual_category'])
local etymology = getEtymology ( current_lexeme )
if etymology ~= '' and etymology ~= nil then
table.insert(sections, heading_level(i18n['heading_etymology'], 4))
table.insert(sections, tostring(etymology))
end
add_any_notes(sections, args, i18n['manual_etymology'])
local pronunciation = getPronunciation( frame, current_lexeme, lang_name, lex_cat )
if pronunciation ~= '' then
table.insert(sections, heading_level(i18n['heading_pronunciation'], 4))
table.insert(sections, tostring(pronunciation))
end
add_any_notes(sections, args, i18n['manual_pronunciation'])
local alternative_spellings = getAlternativeSpellings( current_lexeme )
if alternative_spellings ~= '' then
table.insert(sections, heading_level(i18n['heading_alternative_spellings'], 4))
table.insert(sections, alternative_spellings)
end
if lang_code ~= nil then
if lex_cat == 'Q34698' then -- বিশেষণ
lex_cat_template = safeExpand(frame, lang_code .. '-adj')
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষণ')
end
elseif lex_cat == 'Q1084' then
local gender
local stmts = current_lexeme:getAllStatements('P5185') -- ব্যাকরণগত লিঙ্গ
if #stmts ~= 0 then
local gender_qid = stmts[1].mainsnak.datavalue.value.id
if gender_qid == 'Q499327' then -- @TODO: Add checks for every possible circumstance
gender = 'm'
elseif gender_qid == 'Q1775415' then
gender = 'f'
elseif gender_qid == 'Q1775461' then
gender = 'n'
end
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
if current_language == 'Q13955' then
local matched_lemma = getMatchingLemmaForPageTitle(current_lexeme, title)
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='ar-noun', args={matched_lemma,gender}}
else
lex_cat_template = frame:expandTemplate{title='ar-noun', args={nil,gender}}
end
elseif current_language == 'Q29919' then
lex_cat_template = frame:expandTemplate{title='arz-noun', args={g=gender}}
elseif current_language == 'Q397' then
local matched_lemma = getMatchingLemmaForPageTitle(current_lexeme, title)
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='la-noun', args={matched_lemma,g=gender}}
end
elseif current_language == 'Q11059' then
lex_cat_template = frame:expandTemplate{title='sa-noun', args={g=gender}}
elseif current_language ~= 'Q1860' then -- These templates require the gender to be passed as the 1st argument.
lex_cat_template = safeExpand(frame, lang_code .. '-noun', {gender})
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষ্য', {gender})
end
end
end
end
-- elseif lex_cat == 'Q147276' then
-- lex_cat_template = safeExpand(frame, lang_code .. '-proper noun', {gender})
-- if not lex_cat_template then
-- lex_cat_template = safeExpand(frame, lang_code .. '-নামবাচক বিশেষ্য', {gender})
-- end
end
if lex_cat_template ~= nil then
table.insert(sections, lex_cat_template)
end
local meanings, references_seen = unpack(getMeanings( frame, args, current_lexeme, senses, references_seen, lang_name))
if lex_cat_template == nil then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
table.insert(sections, heading_level(matched_lemma, 4))
else
table.insert(sections, '[[Category:যেসব ভুক্তিতে লেমার হেডিং দেখানো অসম্ভব]]')
end
end
table.insert(sections, tostring(meanings))
add_any_notes(sections, args, i18n['manual_meaning'])
local instance_of = current_lexeme:getBestStatements('P31') -- সত্ত্বার ধরন
if #instance_of ~= 0 then
local instance_of_entity = instance_of[1].mainsnak.datavalue.value.id
if instance_of_entity == 'Q40437546' or instance_of_entity == 'Q120831827' or instance_of_entity == 'Q120717979' then -- @TODO: generalise this so all types of roots are shown
local instance_of_value = '#' .. i18n['text_instance_of'] .. ' ' .. wb.getLabel(instance_of_entity)
table.insert(sections, instance_of_value)
end
end
local translations = getTranslations(frame, senses)
if translations ~= nil then
table.insert(sections, translations)
end
-- (!) বিশেষ ভাষার বিভক্তির সারণি যদি থাকে সেগুলো এখানে নতুন if বিবৃতি যোগ করা যায়।
if next(forms) ~= nil then
if current_language == 'Q9610' then -- বাংলা
local conjTable = require('মডিউল:আভিধানিক উপাত্ত/Q9610').getInflectionTable(frame, current_lexeme)
table.insert(sections, conjTable) -- Bengali adjectives do not need any table thanks to the bn-adj template.
--elseif current_language == 'Q13955' then -- আরবি
-- if lex_cat == 'Q1084' then
-- table.insert(sections, frame:expandTemplate{title='ar-decl-noun', args={lemma}})
-- end
--elseif current_language == 'Q188' then -- জার্মান
-- if lex_cat == 'Q1084' then
-- table.insert(sections, callWikifunctionsFunction('Z28602|' .. lexeme_id .. '|', frame)) -- German noun declension table, enable once T422299 is resolved
-- end
else
if current_language ~= 'Q1860' then -- ইংরেজি
table.insert(sections, buildLanguageAgnosticInflectionTable())
end
end
end
local reference_notes = get_any_notes(sections, args, i18n['manual_reference'])
if #references_seen > 0 or #reference_notes > 0 then
table.insert(sections, heading_level(i18n['heading_references'], 4))
table.insert(sections, frame:extensionTag('references'))
add_specific_notes(sections, reference_notes)
end
local external_link_table = getExternalLinks ( current_lexeme )
if #external_link_table > 0 then
local external_links = '* ' .. table.concat(external_link_table, '\n* ')
table.insert(sections, heading_level(i18n['heading_external_links'], 4))
table.insert(sections, external_links)
end
add_any_notes(sections, args, i18n['manual_external_link'])
if #references_seen == 0 and #reference_notes == 0 and #external_link_table == 0 and #get_any_notes(sections, args, i18n['manual_external_link']) == 0 then
if i18n['category_rfref'][lang_category] ~= nil then
table.insert(sections, '[[Category:' .. i18n['category_rfref'][lang_category] .. ']]')
else
table.insert(sections, '[[Category:' .. i18n['category_rfref']['_'] .. ']]')
end
end
return table.concat(sections,"\n\n")
end
return p
aritr0ja30wwig23ga3gox9v1bzp2wt
507784
507783
2026-04-14T06:35:38Z
Redmin
6857
507784
Scribunto
text/plain
local p = {}
local i18n = require('মডিউল:আভিধানিক উপাত্ত/i18n')
local references = require('মডিউল:উইকিউপাত্ত তথ্যসূত্র বিন্যাসকরণ').format
local getArgs = require('Module:Arguments').getArgs
local formatter_urls = require('মডিউল:আভিধানিক উপাত্ত/urls').formatter_urls
local wb = mw.wikibase
local ustring = mw.ustring
local html = mw.html
local mw_lang = mw.language
local entity_cache = {}
local reference_cache = {}
local forms
local lang_code
local function countWords(string)
local count = 0
for word in ustring.gmatch(string, "%S+") do
count = count + 1
end
return count
end
local function normalizeLemmas(text)
text = mw.ustring.gsub(text, "[ً-ٟ]", "")
text = mw.ustring.gsub(text, "ٰ", "")
text = mw.ustring.gsub(text, "أ", "ا")
text = mw.ustring.gsub(text, "إ", "ا")
text = mw.ustring.gsub(text, "آ", "ا")
return text
end
local function serializeTable(val, name, skipnewlines, depth) -- https://stackoverflow.com/a/6081639
skipnewlines = skipnewlines or false
depth = depth or 0
local tmp = string.rep(" ", depth)
if name then tmp = tmp .. name .. " = " end
if type(val) == "table" then
tmp = tmp .. "{" .. (not skipnewlines and "\n" or "")
for k, v in pairs(val) do
tmp = tmp .. serializeTable(v, k, skipnewlines, depth + 1) .. "," .. (not skipnewlines and "\n" or "")
end
tmp = tmp .. string.rep(" ", depth) .. "}"
elseif type(val) == "number" then
tmp = tmp .. tostring(val)
elseif type(val) == "string" then
tmp = tmp .. string.format("%q", val)
elseif type(val) == "boolean" then
tmp = tmp .. (val and "true" or "false")
else
tmp = tmp .. "\"[inserializeable datatype:" .. type(val) .. "]\""
end
return tmp
end
-- Use this to safely expand templates when you are not sure that they exist.
local function safeExpand(frame, title, args)
local success, result = pcall(function()
return frame:expandTemplate{ title = title, args = args }
end)
if result:find('does not exist') then -- expandTemplate() doesn't seem to throw any error that can be handled with pcall() so string search is the only viable option.
return nil
end
return result
end
local function getReference( id, reference )
local out_id = nil
local url_value
if reference_cache[id] == nil then
local ref_text = references(reference, wb, mw_lang, i18n['content_lang_code'], i18n['wikipedia'])
if reference.snaks ~= nil then
if reference.snaks['P248'] ~= nil then
for _, snak in pairs(reference.snaks['P248']) do
if snak.datavalue and snak.datavalue.value.id == 'Q428' then -- কুরআন
ref_text = ustring.gsub(ref_text, 'নামক অনুচ্ছেদ', 'নং আয়াত')
break
end
end
end
if reference.snaks['P854'] ~= nil then
local snak = reference.snaks['P854'][1]
if snak.datavalue then
url_value = snak.datavalue.value
end
end
end
if url_value ~= nil then
ref_text = ref_text .. ', [' .. url_value .. ' সংযোগ]'
end
reference_cache[id] = ref_text
else
out_id = id
end
return {out_id, reference_cache[id]}
end
local function getEntity( id )
if entity_cache[id] == nil then
entity_cache[id] = wb.getEntity(id)
end
return entity_cache[id] ~= false and entity_cache[id] or nil
end
local function getLexemeLanguageCode(current_lexeme)
local lang_item_id = current_lexeme:getLanguage()
if lang_item_id == nil then
return nil
end
local lang_entity = getEntity(lang_item_id)
if lang_entity == nil then
return nil
end
for i, statement_property in ipairs({'P305','P424', 'P220'}) do -- আইইটিএফ ভাষা ট্যাগ, উইকিমিডিয়া ভাষা কোড, আইএসও ৬৩৯-৩
local statements = lang_entity:getBestStatements(statement_property)
if statements[1] ~= nil then
return statements[1].mainsnak.datavalue.value
end
end
return nil
end
-- Return the first form of the lexeme which has exactly the given grammatical feature.
local function formWithSingleGrammaticalFeature( item_id )
for i = 1, #forms do
local grammaticalFeatures = forms[i]:getGrammaticalFeatures()
if #grammaticalFeatures == 1 and grammaticalFeatures[1] == item_id then
return forms[i]
end
end
return nil
end
local function getArticleLinkTemplate(frame, stmt_value)
local template = ''
local sitelink = getEntity(stmt_value):getSitelink(i18n['wikipedia'])
if sitelink ~= nil then
template = frame:expandTemplate{
title=i18n['template_wikipedia'],
args={sitelink}
}
end
return template
end
local function getArticleLinks (frame, sense )
local article_links = ''
for i, stmt in pairs(sense:getAllStatements('P5137')) do -- এই অর্থের জন্য আইটেম
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
for i, stmt in pairs(sense:getAllStatements('P9970')) do -- এই অর্থের জন্য বিধেয়
article_links = article_links .. getArticleLinkTemplate(frame, stmt.mainsnak.datavalue.value.id)
end
return article_links
end
-- @TODO: Generalise
local function expandTemplateForProperty(frame, object, property, template)
local lemmas = {}
local n = 0
for _, stmt in pairs(object:getAllStatements(property)) do
local lex = wb.lexeme.splitLexemeId(stmt.mainsnak.datavalue.value.id)
lex = getEntity(lex)
n = n + 1
lemmas[n] = lex:getLemma(lang_code)
end
if not lang_code or n == 0 then
return ''
end
-- Build args: first lang_code, then lemmas
local args = {lang_code}
for i = 1, n do
args[#args + 1] = lemmas[i]
end
return frame:expandTemplate{
title = template,
args = args
}
end
local function getExternalLinks( entity ) -- T418639
local external_links = {}
if entity.claims == nil then return external_links end
for property_id, statements in pairs(entity.claims) do
local formatter_url = formatter_urls[property_id]
if formatter_url ~= nil then
local property_source = wb.getBestStatements(property_id, 'P9073')
local source_name
if next(property_source) ~= nil then
source_name = wb.getLabel(property_source[1].mainsnak.datavalue.value.id)
or property_source[1].mainsnak.datavalue.value.id
else
source_name = wb.getLabel(property_id) or property_id
end
for i = 1, #statements do
local stmt = statements[i]
if stmt.mainsnak.datavalue then
local formatted_link = ustring.gsub(
ustring.gsub(formatter_url, '$1', stmt.mainsnak.datavalue.value),
' ', '+'
)
table.insert(external_links,
'[' .. formatted_link .. ' ' .. source_name .. ']')
end
end
end
end
return external_links
end
p.getExternalLinks = getExternalLinks -- রেখে দিন যাতে ডিবাগিং সম্ভব হয়
local function termSpan( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( text )
return tostring( span )
end
local function termLink( term )
local text = term[1]
local lang = term[2]
local dir = mw_lang.new( lang ):getDir()
local span = html.create( 'span' )
span:attr( 'lang', lang )
:attr( 'dir', dir )
:wikitext( '[[' .. text .. ']]' )
return tostring( span )
end
local function getLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termSpan(rep)
else
lemma_string = lemma_string .. '/' .. termSpan(rep)
end
end
return lemma_string
end
local function getLinkedLemmata( current_lexeme )
local lemma_string = ''
for i, rep in pairs(current_lexeme:getLemmas()) do
if lemma_string == '' then
lemma_string = termLink(rep)
else
lemma_string = lemma_string .. '/' .. termLink(rep)
end
end
return lemma_string
end
local function getExamples( current_lexeme, sense_id, references_seen )
local examples = html.create('dl')
for i, stmt in pairs(current_lexeme:getAllStatements('P5831')) do -- ব্যবহারের উদাহরণ
if stmt.qualifiers ~= nil and stmt.qualifiers['P6072'] ~= nil and stmt.qualifiers['P6072'][1].datavalue.value.id == sense_id then -- বিষয়ে লেক্সিমের অর্থ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
local example_form_strs = {}
if stmt.qualifiers['P1810'] ~= nil then
table.insert(example_form_strs, stmt.qualifiers['P1810'][1].datavalue.value)
elseif stmt.qualifiers['P5830'] ~= nil then
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
for i, rep in pairs(example_form:getRepresentations()) do
table.insert(example_form_strs, rep[1])
end
end
example_str = nil
for i, example_form_str in pairs(example_form_strs) do
new_example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
if new_example_text ~= example_text then
example_str = termSpan({new_example_text, example_lang})
break
end
new_example_text = example_text
end
if example_str == nil then
example_str = termSpan({example_text, example_lang})
end
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
end
for i, stmt in pairs(wb.getAllStatements(sense_id, 'P5831')) do -- ব্যবহারের উদাহরণ
example_text = ustring.gsub(stmt.mainsnak.datavalue.value.text, ' / ','<br/>')
example_lang = stmt.mainsnak.datavalue.value.language
example_form = getEntity(stmt.qualifiers['P5830'][1].datavalue.value.id) -- বিষয়ে লেক্সিমের রূপ
local example_form_str = nil
if stmt.qualifiers['P1810'] ~= nil then
example_form_str = stmt.qualifiers['P1810'][1].datavalue.value
end
if example_form_str == nil then
example_form_str = example_form:getRepresentation(i18n['content_lang_code'])
end
if example_form_str == nil then
example_form_str = example_form:getRepresentations()[1][1]
end
example_text = ustring.gsub(example_text, example_form_str, "'''" .. example_form_str .. "'''")
example_str = termSpan({example_text, example_lang})
local reference_text = ''
if stmt.references ~= nil then
for j, reference in pairs(stmt.references) do
table.insert(references_seen, reference.hash)
local got_reference = getReference(reference.hash, reference)
reference_text = reference_text .. '\n\n' .. got_reference[2]
end
end
if example_str ~= nil then
examples:tag('dd'):wikitext("''" .. example_str .. "''")
if reference_text ~= '' then
examples:done():tag('dd'):css('text-indent', '2em'):wikitext(reference_text)
end
end
end
return { tostring(examples) , references_seen }
end
local function checkTitleCodePointInRange(title, start_point, end_point)
return ustring.find( title, '[' ..ustring.char(start_point) .. '-' .. ustring.char(end_point) .. ']' )
end
local function getLanguageForCategories( lang_id, current_page_title )
-- বিশেষ ভাষার জন্য
if lang_id == 'Q11051' then -- হিন্দি/উর্দু
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- উর্দু
lang_id = 'Q11051ur'
elseif checkTitleCodePointInRange(current_page_title, 0x0900, 0x097f) ~= nil then -- হিন্দি
lang_id = 'Q11051hi'
end
elseif lang_id == 'Q58635' then -- পাঞ্জাবি
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- শাহমুখী
lang_id = 'Q58635pnb'
elseif checkTitleCodePointInRange(current_page_title, 0x0a00, 0x0a7f) ~= nil then -- গুরুমুখী
lang_id = 'Q58635pa'
end
elseif lang_id == 'Q56356571' then -- নয়া ফার্সি ভাষা
if checkTitleCodePointInRange(current_page_title, 0x0600, 0x06ff) ~= nil then -- ফার্সি (ইরান/আফগানিস্তান)
lang_id = 'Q56356571fa'
elseif checkTitleCodePointInRange(current_page_title, 0x0400, 0x04ff) ~= nil then -- তাজিক
lang_id = 'Q56356571tg'
end
end
return lang_id
end
local function getOneStringForProperty(object, property)
local val
local stmts = object:getAllStatements(property)
if #stmts ~= 0 then
val = stmts[1].mainsnak.datavalue.value
end
return val
end
local function getTranslations(frame, senses) -- TODO: woefully incomplete until T185313 and T199887 are resolved
if #senses == 0 then
return nil
end
local translation_set = {}
for i, sense in pairs(senses) do
for i, stmt in pairs(sense:getAllStatements('P5972')) do
local translation = stmt.mainsnak.datavalue.value.id
local lexeme_id = wb.lexeme.splitLexemeId(translation)
local language = wb.getLabel(getEntity(lexeme_id):getLanguage())
gloss = sense:getGloss('bn')
table.insert(translation_set, language .. ': ' .. getLinkedLemmata(getEntity(lexeme_id)) .. '<br/>')
end
end
if #translation_set == 0 then
return nil
end
local translations = frame:expandTemplate{title = i18n['template_trans-top'], args={gloss}}
translations = '====' .. i18n['heading_translation'] .. '==== \n' .. translations .. table.concat(translation_set, '\n') .. frame:expandTemplate{title = i18n['template_trans-bottom']}
return translations
end
local function getCategory( lang_category, cat_id )
local cat_text = wb.getLabel( cat_id )
local lang_categories = i18n['lang_categories']
if lang_categories[lang_category] ~= nil then
local lang_cat = lang_categories[lang_category]
if lang_cat[cat_id] ~= nil then
cat_text = cat_text .. '[[Category:' .. lang_cat[cat_id] .. ']]'
else
cat_text = cat_text .. '[[Category:' .. lang_cat['_'] .. ']]'
end
else
cat_text = cat_text .. '[[Category:' .. i18n['maintenance_no_lang_category_found'] .. ']]'
end
return cat_text
end
local createicon = function(langcode, entityID, propertyID)
langcode = langcode or ""
propertyID = propertyID or ""
local icon = " <span class='penicon autoconfirmed-show'>[["
-- " <span data-bridge-edit-flow='overwrite' class='penicon'>[[" -> enable Wikidata Bridge
.. "File:OOjs UI icon edit-ltr-progressive.svg |frameless |text-top |10px |alt="
.. i18n['edit_wikidata']
.. "|link=https://www.wikidata.org/entity/" .. entityID
if langcode ~= "" then icon = icon .. "?uselang=" .. langcode end
if propertyID ~= "" then icon = icon .. "#" .. propertyID end
icon = icon .. "|" .. i18n['edit_wikidata'] .. "]]</span>"
return icon
end
local function getMeanings( frame, args, current_lexeme, senses, references_seen, language_name)
if #senses == 0 then
return {createicon(i18n['content_lang_code'], current_lexeme:getId()) .. "''" .. i18n['text_category_rfdef'] .. "''" .. '[[Category:' .. i18n['category_rfdef'] .. ']]', references_seen}
end
local meanings = html.create( 'ol' )
for i, sense in pairs(senses) do
local gloss_text_parts = {}
local main_gloss_text = frame:expandTemplate{
title=i18n['template_anchor'],
args={sense:getId()}
}
local specifiers = {}
for k, property_id in ipairs({'P6084', 'P6191', 'P9488'}) do -- অবস্থান যেখানে শব্দার্থ ব্যবহৃত, যে রীতিতে শব্দার্থ ব্যবহৃত হয়, যে ক্ষেত্রে ব্যবহৃত
for i, stmt in pairs(sense:getAllStatements(property_id)) do
local stmt_value = stmt.mainsnak.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value, i18n['content_lang_code']))
end
end
if #specifiers > 0 then
main_gloss_text = main_gloss_text .. "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
local gloss = sense:getGloss( i18n['content_lang_code'] )
if gloss ~= nil then
if countWords(gloss) == 1 then
main_gloss_text = main_gloss_text .. "[[" .. gloss .. "#" .. i18n['content_lang_name'] .. "|" .. gloss .. "]]"
else
main_gloss_text = main_gloss_text .. gloss
end
if gloss:match('^প্রদত্ত%s*(%S-)%s*নাম$') then -- given names
main_gloss_text = main_gloss_text .. '[[' .. 'Category:' .. language_name .. ' ' .. i18n['category_given_names'] .. ']]'
end
else
local other_gloss_text = nil
local other_gloss_lang = nil
local item_label_gloss_parts = {}
for k, stmt in pairs(sense:getAllStatements('P5137')) do -- যদি 'এই অর্থের জন্য আইটেম' মানের বাংলা লেবেল থাকে
local stmt_value = stmt.mainsnak.datavalue.value.id
local stmt_label = wb.getLabel(stmt_value)
if stmt_label ~= nil then
table.insert(item_label_gloss_parts, '[[:d:' .. stmt_value .. '|' .. stmt_label .. ']]')
end
end
if #item_label_gloss_parts > 0 then
other_gloss_text = table.concat(item_label_gloss_parts, '; ')
end
if other_gloss_text == nil then
for i, fallback_lang in ipairs(mw_lang.getFallbacksFor( i18n['content_lang_code'] )) do
if sense:getGloss( fallback_lang ) ~= nil then
other_gloss_text, other_gloss_lang = sense:getGloss( fallback_lang )
end
end
if other_gloss_lang == nil then
local glosses = sense:getGlosses()
for j, gloss in pairs(glosses) do
other_gloss_text = gloss[1]
other_gloss_lang = gloss[2]
break
end
end
main_gloss_text = main_gloss_text .. other_gloss_text .. "<sup><em>" .. mw_lang.fetchLanguageName(other_gloss_lang, i18n['content_lang_code']) .. "</em></sup>"
else
main_gloss_text = main_gloss_text .. "''" .. other_gloss_text .. "''"
end
main_gloss_text = main_gloss_text .. '[[Category:' .. i18n['category_rfdef_equivalent'] .. ']]'
end
local synonym = expandTemplateForProperty(frame, sense, 'P5973', i18n['template_synonym'])
if synonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. synonym
end
local antonym = expandTemplateForProperty(frame, sense, 'P5974', i18n['template_antonym'])
if antonym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. antonym
end
local hypernym = expandTemplateForProperty(frame, sense, 'P6593', i18n['template_hypernym'])
if hypernym ~= '' then
main_gloss_text = main_gloss_text .. ' <br/> ' .. hypernym
end
if lex_cat == 'Q1084' or lex_cat == 'Q147276' then -- noun or proper noun
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-noun'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
elseif lex_cat == 'Q34698' then
local demonym = expandTemplateForProperty(frame, sense, 'P6271', i18n['template_demonym-adj'])
main_gloss_text = main_gloss_text .. ' <br/> ' .. demonym
end
table.insert(gloss_text_parts, main_gloss_text .. createicon(i18n['content_lang_code'], sense:getId()))
for i, stmt in pairs(sense:getAllStatements('P8394')) do -- টিপ্পনীর উদ্ধৃতি
gloss_quote = termSpan({stmt.mainsnak.datavalue.value.text, stmt.mainsnak.datavalue.value.language})
if stmt.references[1] ~= nil then
local got_reference = getReference ( stmt.references[1].hash, stmt.references[1] )
gloss_quote = '"' .. gloss_quote .. '" ' .. got_reference[2]
end
table.insert(references_seen, stmt.references[1].hash)
table.insert(gloss_text_parts, frame:extensionTag('ref', gloss_quote))
end
for i, stmt in pairs(sense:getAllStatements('P1343')) do -- বর্ণিত উৎস
-- TODO: do away with making fake reference objects
local fake_reference = { ['snaks'] = {} }
fake_reference.snaks['P248'] = { [1] = stmt.mainsnak }
qualifiers_order = stmt['qualifiers-order']
if qualifiers_order ~= nil then
for i, k in ipairs(qualifiers_order) do fake_reference.snaks[k] = stmt.qualifiers[k] end
end
fake_reference.hash = mw.hash.hashValue('sha3-512', serializeTable(fake_reference))
table.insert(references_seen, fake_reference.hash)
local got_reference = getReference(fake_reference.hash, fake_reference)
if got_reference[1] == nil then
table.insert(gloss_text_parts, frame:extensionTag('ref', got_reference[2], {name = fake_reference.hash}))
else
table.insert(gloss_text_parts, frame:extensionTag{name = 'ref', content='', args = {name = got_reference[1]}})
end
end
local first_sense_image = ''
local sense_images = sense:getAllStatements('P18')
if next(sense_images) ~= nil then
first_sense_image = sense_images[1].mainsnak.datavalue.value
end
if first_sense_image ~= '' then
table.insert(gloss_text_parts, '[[চিত্র:' .. first_sense_image .. "|thumb|'''" .. getLemmata(current_lexeme) .. "'''—" .. main_gloss_text .. ']]')
end
local idlinks = getExternalLinks(sense)
if #idlinks > 0 then
local idlinktext = '<small>('
for i, idlink in pairs(idlinks) do
idlinktext = idlinktext .. idlink .. '\n'
end
idlinktext = idlinktext .. ')</small>'
table.insert(gloss_text_parts, idlinktext)
end
local externallinks = getArticleLinks(frame, sense)
if externallinks ~= '' then
table.insert(gloss_text_parts, externallinks)
end
local new_notes = {}
local sense_keys = { sense:getId(), string.sub(sense:getId(), string.find(sense:getId(), '-')+1) }
for i, v in ipairs(sense_keys) do
if args[v] ~= nil then
table.insert(new_notes, args[v])
end
end
if #new_notes > 0 then
for i, v in ipairs(new_notes) do
if i == 1 then
table.insert(gloss_text_parts, '<br/>' .. v)
else
table.insert(gloss_text_parts, v)
end
end
end
local examples, references_seen = unpack(getExamples( current_lexeme, sense:getId(), references_seen ))
local gloss_text = table.concat(gloss_text_parts, '\n')
meanings:tag('li'):wikitext(gloss_text):wikitext(examples)
end
return {meanings, references_seen}
end
local function getPronunciationBaseForm( lang_name, lex_cat)
local base_form = nil
-- (!) অন্য ভাষার শব্দের যদি অন্য রকম মূল ফর্ম থাকে সেগুলো এখানে নতুন if বিবৃতি দিয়ে যোগ করা যায়।
if lang_name == 'বাংলা' then
if lex_cat == 'Q1084' then -- বিশেষ্য
base_form = formWithSingleGrammaticalFeature( 'Q131105' ) -- কর্তৃকারক
elseif lex_cat == 'Q24905' then -- ক্রিয়া
base_form = formWithSingleGrammaticalFeature( 'Q1350145' ) -- ক্রিয়া বিশেষ্য
end
end
if base_form == nil then
for i, form in pairs(forms) do
base_form = form
break
end
end
return base_form
end
local function getCombines( current_lexeme )
local combines = ''
local index_mappings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P5238')) do
if stmt.qualifiers ~= nil and stmt.qualifiers['P1545'] ~= nil then -- ক্রম
local current_index = tonumber(stmt.qualifiers['P1545'][1].datavalue.value)
index_mappings[current_index] = stmt
end
end
if #index_mappings ~= 0 then
for i, stmt in ipairs(index_mappings) do
local part_lexeme_id = stmt.mainsnak.datavalue.value.id
local part_lexeme = getEntity(part_lexeme_id)
local current_substring = getLinkedLemmata(part_lexeme)
local part_etymology = getEtymology(part_lexeme)
if part_etymology ~= '' and part_etymology ~= nil then
current_substring = current_substring .. ' (← ' .. part_etymology .. ')'
end
if combines == '' then
combines = current_substring
else -- @TODO: This shoukd use the 'affix' template instead.
combines = combines .. ' + ' .. current_substring
end
end
end
return combines
end
function getRoots ( current_lexeme )
local stmts = current_lexeme:getAllStatements('P5920')
if #stmts == 0 then return '' end
local root_lexeme = getEntity(stmts[1].mainsnak.datavalue.value.id)
return '√' .. getLinkedLemmata(root_lexeme)
end
function getEtymology ( current_lexeme )
-- TODO: see if any etymology chains are not possible to render
local etymology = ''
local current_combines = getCombines(current_lexeme)
local current_roots = getRoots(current_lexeme)
local stmts = current_lexeme:getAllStatements('P5191')
if #stmts == 0 then
if current_roots ~= '' and current_combines ~= '' then
return current_roots .. '<br/>(' .. current_combines .. ')'
elseif current_roots ~= '' then
return current_roots
else
return current_combines
end
end
for i, stmt in pairs(stmts) do
local origin_lexeme_dv = stmt.mainsnak.datavalue -- If this is nil, the origin lexeme is not known.
if origin_lexeme_dv ~= nil then
local origin_lexeme = getEntity(origin_lexeme_dv.value.id)
local origin_lexeme_lang = origin_lexeme:getLanguage()
local origin_lexeme_string = getLinkedLemmata(origin_lexeme) .. ' (' .. wb.getLabel(origin_lexeme_lang) .. ')'
if stmt.qualifiers ~= nil and stmt.qualifiers['P5886'] ~= nil then
local mode_of_derivation = stmt.qualifiers['P5886'][1].datavalue.value.id
if mode_of_derivation == 'Q1345001' then
origin_lexeme_string = ustring.gsub(i18n['etymology_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q845079' then
origin_lexeme_string = ustring.gsub(i18n['etymology_learned_borrowing'], '$1', origin_lexeme_string)
elseif mode_of_derivation == 'Q56611986' then
origin_lexeme_string = ustring.gsub(i18n['etymology_inheritance'], '$1', origin_lexeme_string)
end
end
local origin_origin = getEtymology(origin_lexeme)
local new_etymology_string = ''
if origin_origin ~= '' and origin_origin ~= nil then
new_etymology_string = origin_lexeme_string .. ' ← ' .. origin_origin
else
new_etymology_string = origin_lexeme_string
end
end
if etymology == '' then
etymology = new_etymology_string
elseif origin_lexeme_string ~= nil then
etymology = etymology .. ' ' .. origin_lexeme_string
end
end
if current_roots ~= '' then
etymology = etymology .. ' ' .. current_roots
end
if current_combines ~= '' then
etymology = etymology .. '<br/>(' .. current_combines .. ')'
end
return etymology
end
local function getPronunciation ( frame, current_lexeme, lang_name, lex_cat )
local pronunciations = {}
local base_form = getPronunciationBaseForm(lang_name, lex_cat )
if base_form ~= nil then
for i, stmt in pairs(base_form:getAllStatements('P443')) do -- উচ্চারণের অডিও
local pronunciation_file = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in pairs(stmt.qualifiers[property_id]) do
local stmt_value = qual.datavalue.value.id
table.insert(specifiers, wb.getLabel(stmt_value))
end
end
end
if #specifiers > 0 then
specifier_text = table.concat(specifiers, "'', ''")
end
local audio_text
if specifier_text ~= '' then
audio_text = i18n['text_audio'] .. ' (' .. specifier_text .. ')'
else
audio_text = i18n['text_audio']
end
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title= i18n['template_audio'],
args = {lang_name, pronunciation_file, audio_text}
})
end
local ipa_transcription = base_form:getAllStatements('P898') -- - আধ্বব প্রতিলিপিকরণ
local iso15919_transcription = getOneStringForProperty(base_form, 'P5825') -- আইএসও ১৫৯১৯ প্রতিলিপিকরণ
local itrans = getOneStringForProperty(base_form, 'P8881') -- ITRANS
local iast = getOneStringForProperty(base_form, 'P7581') -- আইএএসটি প্রতিলিপিকরণ
-- @TODO: অডিও ও প্রতিলিপিকরণ দুটোই থাকলে সেই ক্ষেত্রে একটার ঠিক পরেই আরেকটা দেখানো উচিত
if #ipa_transcription ~= 0 then
for i, stmt in pairs(ipa_transcription) do
local ipa_text = stmt.mainsnak.datavalue.value
local specifier_text = ''
local specifiers = {}
if stmt.qualifiers ~= nil then
for k, property_id in ipairs({'P5237'}) do -- উচ্চারণের ধরন
for l, qual in ipairs(stmt.qualifiers[property_id]) do
table.insert(specifiers, wb.getLabel( qual.datavalue.value.id ))
end
end
end
if #specifiers > 0 then
specifier_text = "(''" .. table.concat(specifiers, "'', ''") .. "'') "
end
table.insert(pronunciations, '* ' .. specifier_text .. frame:expandTemplate{
title= i18n['template_ipa'],
args = {lang_name, ipa_text}
})
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
elseif lang_name == 'বাংলা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='bn-IPA',
})
elseif lang_name == 'আরবি' then
local lemma = current_lexeme:getLemma('ar')
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='ar-IPA',
args={lemma}
})
elseif lang_name == 'ফালা' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fax-pron',
})
elseif lang_name == 'ফিনীয়' then
table.insert(pronunciations, '* ' .. frame:expandTemplate{
title='fi-IPA',
})
end
if iso15919_transcription ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iso15919'] .. ': ' .. iso15919_transcription)
end
if itrans ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_itrans'] .. ': ' .. itrans)
end
if iast ~= nil then
table.insert(pronunciations, '* ' .. i18n['text_iast'] .. ': ' .. iast)
end
end -- {{আধ্বব|en|/ˈɪntəvjuː/}}
return table.concat(pronunciations, '\n')
end
function getAlternativeSpellings( current_lexeme )
local alt_spellings = {}
for i, stmt in pairs(current_lexeme:getAllStatements('P11577')) do -- বিকল্প বানান
if stmt.mainsnak.datavalue ~= nil then
table.insert(alt_spellings, '* ' .. getLinkedLemmata(getEntity(stmt.mainsnak.datavalue.value.id)))
end
end
return table.concat(alt_spellings, '\n')
end
local function heading_level(text, level)
local heading_delimiter = string.rep('=', level)
return heading_delimiter .. ' ' .. text .. ' ' .. heading_delimiter
end
function get_any_notes(sections, args, keys)
local notes = {}
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(notes, args[v])
end
end
return notes
end
function add_specific_notes(sections, notes)
for i, v in ipairs(notes) do
table.insert(sections, v)
end
end
local function add_any_notes(sections, args, keys)
for i, v in ipairs(keys) do
if args[v] ~= nil then
table.insert(sections, args[v])
end
end
end
-- This calls frame:preprocess() instead of :callParserFunction() because the latter does not work for Wikifunctions function calls yet (see https://www.wikifunctions.org/wiki/Wikifunctions:Embedded_function_calls).
local function callWikifunctionsFunction(args, frame)
return frame:preprocess('{{#function:' .. args .. '}}')
end
local function getMatchingLemmaForPageTitle(lexeme, title)
local lemmas = lexeme:getLemmas()
local matched_lemma
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
end
local function buildLanguageAgnosticInflectionTable()
local has_image = false
local form_images = {}
for i, form in ipairs(forms) do
local form_image = form:getAllStatements('P7407')
if next(form_image) ~= nil then
form_images[i] = form_image[1].mainsnak.datavalue.value
has_image = true
end
end
local table_class = "wikitable mw-collapsible sortable"
if not has_image then
table_class = table_class .. " mw-collapsed"
end
local text = "{| class='" .. table_class .. "' style='border:solid 1px rgb(80%,80%,100%); text-align:center;'\n"
text = text .. "|+ " .. i18n['heading_inflection_table'] .. "\n"
text = text .. "|- \n"
text = text .. "! " .. i18n['heading_form'] .. " !! " .. i18n['heading_grammatical_features']
if has_image then
text = text .. " !! " .. (i18n['heading_image'])
end
text = text .. " \n"
for i, form in ipairs(forms) do
local rep = form:getRepresentations()
local feat = form:getGrammaticalFeatures()
local rep_text = ""
for j, r in pairs(rep) do
if rep_text == "" then
rep_text = r[1]
else
rep_text = rep_text .. " / " .. r[1]
end
end
local feat_text = ""
if feat ~= nil then
for j, f in ipairs(feat) do
local label = wb.getLabel(f)
if feat_text == "" then
feat_text = label
else
feat_text = feat_text .. ", " .. label
end
end
end
text = text .. "|-\n"
text = text .. "| " .. (rep_text ~= "" and rep_text or "—")
text = text .. " || " .. (feat_text ~= "" and feat_text or "—")
if has_image then
local image_cell = "—"
if form_images[i] ~= nil then
image_cell = "[[চিত্র:" .. form_images[i] .. "|50px]]"
end
text = text .. " || " .. image_cell
end
text = text .. "\n"
end
text = text .. "|}"
return text
end
function p.all( frame )
local args = getArgs(frame)
local lexeme_id = args[1]
local current_lexeme = getEntity(lexeme_id)
local current_language = current_lexeme:getLanguage()
local senses = current_lexeme:getSenses()
local add_heading = true
forms = current_lexeme:getForms()
if args[2] ~= nil then
local val = mw.text.trim(tostring(args[2]))
if val == "false" or val == "0" or val == "না" then
add_heading = false
end
end
local references_seen = {}
local sections = {}
local lang_name = wb.getLabel(current_language)
if add_heading == true then
local lang_heading = "== " .. lang_name .. " =="
table.insert(sections, lang_heading)
end
local lex_cat = current_lexeme:getLexicalCategory()
lang_code = getLexemeLanguageCode(current_lexeme) -- This should remain available to all functions.
local title = mw.title.getCurrentTitle().text
local lang_category = getLanguageForCategories(current_language, title)
local cat = getCategory( lang_category, lex_cat )
local lex_cat_template
if cat ~= nil then
table.insert(sections, '===' .. cat .. frame:expandTemplate{
title = i18n['template_anchor'],
args = { lexeme_id }
} .. '===')
table.insert(sections, frame:expandTemplate{
title= i18n['template_lexeme'],
args = {lexeme_id}
})
add_any_notes(sections, args, i18n['manual_category'])
local etymology = getEtymology ( current_lexeme )
if etymology ~= '' and etymology ~= nil then
table.insert(sections, heading_level(i18n['heading_etymology'], 4))
table.insert(sections, tostring(etymology))
end
add_any_notes(sections, args, i18n['manual_etymology'])
local pronunciation = getPronunciation( frame, current_lexeme, lang_name, lex_cat )
if pronunciation ~= '' then
table.insert(sections, heading_level(i18n['heading_pronunciation'], 4))
table.insert(sections, tostring(pronunciation))
end
add_any_notes(sections, args, i18n['manual_pronunciation'])
local alternative_spellings = getAlternativeSpellings( current_lexeme )
if alternative_spellings ~= '' then
table.insert(sections, heading_level(i18n['heading_alternative_spellings'], 4))
table.insert(sections, alternative_spellings)
end
if lang_code ~= nil then
if lex_cat == 'Q34698' then -- বিশেষণ
lex_cat_template = safeExpand(frame, lang_code .. '-adj')
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষণ')
end
elseif lex_cat == 'Q1084' then
local gender
local stmts = current_lexeme:getAllStatements('P5185') -- ব্যাকরণগত লিঙ্গ
if #stmts ~= 0 then
local gender_qid = stmts[1].mainsnak.datavalue.value.id
if gender_qid == 'Q499327' then -- @TODO: Add checks for every possible circumstance
gender = 'm'
elseif gender_qid == 'Q1775415' then
gender = 'f'
elseif gender_qid == 'Q1775461' then
gender = 'n'
end
end
-- The following checks are ordered based on which one is expected to be true in a higher number of cases.
if current_language == 'Q13955' then
local matched_lemma = getMatchingLemmaForPageTitle(current_lexeme, title)
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='ar-noun', args={matched_lemma,gender}}
else
lex_cat_template = frame:expandTemplate{title='ar-noun', args={nil,gender}}
end
elseif current_language == 'Q29919' then
lex_cat_template = frame:expandTemplate{title='arz-noun', args={g=gender}}
elseif current_language == 'Q397' then
local matched_lemma = getMatchingLemmaForPageTitle(current_lexeme, title)
if matched_lemma ~= nil then
lex_cat_template = frame:expandTemplate{title='la-noun', args={matched_lemma,g=gender}}
end
elseif current_language == 'Q11059' then
lex_cat_template = frame:expandTemplate{title='sa-noun', args={g=gender}}
elseif current_language ~= 'Q1860' then -- These templates require the gender to be passed as the 1st argument.
lex_cat_template = safeExpand(frame, lang_code .. '-noun', {gender})
if not lex_cat_template then
lex_cat_template = safeExpand(frame, lang_code .. '-বিশেষ্য', {gender})
end
end
end
end
-- elseif lex_cat == 'Q147276' then
-- lex_cat_template = safeExpand(frame, lang_code .. '-proper noun', {gender})
-- if not lex_cat_template then
-- lex_cat_template = safeExpand(frame, lang_code .. '-নামবাচক বিশেষ্য', {gender})
-- end
end
if lex_cat_template ~= nil then
table.insert(sections, lex_cat_template)
end
local meanings, references_seen = unpack(getMeanings( frame, args, current_lexeme, senses, references_seen, lang_name))
if lex_cat_template == nil then
local lemmas = current_lexeme:getLemmas()
local matched_lemma = nil
for _, lemma_entry in ipairs(lemmas) do
local lemma = lemma_entry[1]
local clean_lemma = normalizeLemmas(lemma)
if clean_lemma == title then
matched_lemma = lemma
break
end
end
if matched_lemma ~= nil then
table.insert(sections, heading_level(matched_lemma, 4))
else
table.insert(sections, '[[Category:যেসব ভুক্তিতে লেমার হেডিং দেখানো অসম্ভব]]')
end
end
table.insert(sections, tostring(meanings))
add_any_notes(sections, args, i18n['manual_meaning'])
local instance_of = current_lexeme:getBestStatements('P31') -- সত্ত্বার ধরন
if #instance_of ~= 0 then
local instance_of_entity = instance_of[1].mainsnak.datavalue.value.id
if instance_of_entity == 'Q40437546' or instance_of_entity == 'Q120831827' or instance_of_entity == 'Q120717979' or instance_of_entity == 'Q124476844' then -- @TODO: generalise this so all types of roots are shown
local instance_of_value = i18n['text_instance_of'] .. ' ' .. wb.getLabel(instance_of_entity)
table.insert(sections, instance_of_value)
end
end
local translations = getTranslations(frame, senses)
if translations ~= nil then
table.insert(sections, translations)
end
-- (!) বিশেষ ভাষার বিভক্তির সারণি যদি থাকে সেগুলো এখানে নতুন if বিবৃতি যোগ করা যায়।
if next(forms) ~= nil then
if current_language == 'Q9610' then -- বাংলা
local conjTable = require('মডিউল:আভিধানিক উপাত্ত/Q9610').getInflectionTable(frame, current_lexeme)
table.insert(sections, conjTable) -- Bengali adjectives do not need any table thanks to the bn-adj template.
--elseif current_language == 'Q13955' then -- আরবি
-- if lex_cat == 'Q1084' then
-- table.insert(sections, frame:expandTemplate{title='ar-decl-noun', args={lemma}})
-- end
--elseif current_language == 'Q188' then -- জার্মান
-- if lex_cat == 'Q1084' then
-- table.insert(sections, callWikifunctionsFunction('Z28602|' .. lexeme_id .. '|', frame)) -- German noun declension table, enable once T422299 is resolved
-- end
else
if current_language ~= 'Q1860' then -- ইংরেজি
table.insert(sections, buildLanguageAgnosticInflectionTable())
end
end
end
local reference_notes = get_any_notes(sections, args, i18n['manual_reference'])
if #references_seen > 0 or #reference_notes > 0 then
table.insert(sections, heading_level(i18n['heading_references'], 4))
table.insert(sections, frame:extensionTag('references'))
add_specific_notes(sections, reference_notes)
end
local external_link_table = getExternalLinks ( current_lexeme )
if #external_link_table > 0 then
local external_links = '* ' .. table.concat(external_link_table, '\n* ')
table.insert(sections, heading_level(i18n['heading_external_links'], 4))
table.insert(sections, external_links)
end
add_any_notes(sections, args, i18n['manual_external_link'])
if #references_seen == 0 and #reference_notes == 0 and #external_link_table == 0 and #get_any_notes(sections, args, i18n['manual_external_link']) == 0 then
if i18n['category_rfref'][lang_category] ~= nil then
table.insert(sections, '[[Category:' .. i18n['category_rfref'][lang_category] .. ']]')
else
table.insert(sections, '[[Category:' .. i18n['category_rfref']['_'] .. ']]')
end
end
return table.concat(sections,"\n\n")
end
return p
a1x9rsuvpokmhgmzveitislw3hnfycn
মডিউল:etymology
828
51758
507787
325668
2026-04-14T06:42:17Z
Redmin
6857
[[en:Module:etymology|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507787
Scribunto
text/plain
local export = {}
-- For testing
local force_cat = false
local debug_track_module = "Module:debug/track"
local languages_module = "Module:languages"
local links_module = "Module:links"
local pron_qualifier_module = "Module:pron qualifier"
local table_module = "Module:table"
local utilities_module = "Module:utilities"
local concat = table.concat
local insert = table.insert
local new_title = mw.title.new
local function debug_track(...)
debug_track = require(debug_track_module)
return debug_track(...)
end
local function format_categories(...)
format_categories = require(utilities_module).format_categories
return format_categories(...)
end
local function format_qualifiers(...)
format_qualifiers = require(pron_qualifier_module).format_qualifiers
return format_qualifiers(...)
end
local function full_link(...)
full_link = require(links_module).full_link
return full_link(...)
end
local function get_language_data_module_name(...)
get_language_data_module_name = require(languages_module).getDataModuleName
return get_language_data_module_name(...)
end
local function get_link_page(...)
get_link_page = require(links_module).get_link_page
return get_link_page(...)
end
local function language_link(...)
language_link = require(links_module).language_link
return language_link(...)
end
local function serial_comma_join(...)
serial_comma_join = require(table_module).serialCommaJoin
return serial_comma_join(...)
end
local function shallow_copy(...)
shallow_copy = require(table_module).shallowCopy
return shallow_copy(...)
end
local function track(page, code)
local tracking_page = "etymology/" .. page
debug_track(tracking_page)
if code then
debug_track(tracking_page .. "/" .. code)
end
end
local function join_segs(segs, conj)
if not segs[2] then
return segs[1]
elseif conj == "and" or conj == "or" then
return serial_comma_join(segs, {conj = conj})
end
local sep
if conj == "," or conj == ";" then
sep = conj .. " "
elseif conj == "/" then
sep = "/"
elseif conj == "~" then
sep = " ~ "
elseif conj then
error(("Internal error: Unrecognized conjunction \"%s\""):format(conj))
else
error(("Internal error: No value supplied for conjunction"):format(conj))
end
return concat(segs, sep)
end
-- Returns true if `lang` is the same as `source`, or a variety of it.
local function lang_is_source(lang, source)
return lang:getCode() == source:getCode() or lang:hasParent(source)
end
--[==[
Format one or more links as specified in `termobjs`, a list of term objects of the format accepted by `full_link()` in
[[Module:links]], additionally with optional qualifiers, labels and references. `conj` is used to join multiple terms
and must be specified if there is more than one term. `template_name` is the template name used in debug tracking and
must be specified. Optional `sourcetext` is text to prepend to the concatenated terms, separated by a space if the
concatenated terms are non-empty (which is always the case unless there is a single term with the value "-"). If
`qualifiers_labels_on_outside` is given, any qualifiers, labels or references specified in the first term go on the
outside of (i.e before) `sourcetext`; otherwise they will end up on the inside.
]==]
function export.format_links(termobjs, conj, template_name, sourcetext, qualifiers_labels_on_outside)
if not template_name then
error("Internal error: Must specify `template_name` to format_links()")
end
for i, termobj in ipairs(termobjs) do
if termobj.lang:hasType("family") or termobj.lang:getFamilyCode() == "qfa-sub" then
if termobj.term and termobj.term ~= "-" then
debug_track(template_name .. "/family-with-term")
end
termobj.term = "-"
end
if termobj.term == "-" then
--[=[
[[Special:WhatLinksHere/Wiktionary:Tracking/cognate/no-term]]
[[Special:WhatLinksHere/Wiktionary:Tracking/derived/no-term]]
[[Special:WhatLinksHere/Wiktionary:Tracking/borrowed/no-term]]
[[Special:WhatLinksHere/Wiktionary:Tracking/calque/no-term]]
]=]
debug_track(template_name .. "/no-term")
termobjs[i] = i == 1 and sourcetext or ""
else
if i == 1 and qualifiers_labels_on_outside and sourcetext then
termobj.pretext = sourcetext .. " "
sourcetext = nil
end
termobjs[i] = (i == 1 and sourcetext and sourcetext .. " " or "") ..
full_link(termobj, "term", nil, "show qualifiers")
end
end
return join_segs(termobjs, conj)
end
function export.get_display_and_cat_name(source, raw)
local display, cat_name
if source:getCode() == "und" then
display = "undetermined"
cat_name = "other languages"
elseif source:getCode() == "mul" then
display = raw and "translingual" or "[[w:Translingualism|translingual]]"
cat_name = "Translingual"
elseif source:getCode() == "mul-tax" then
display = raw and "taxonomic name" or "[[w:Biological nomenclature|taxonomic name]]"
cat_name = "taxonomic names"
else
display = raw and source:getCanonicalName() or source:makeWikipediaLink()
cat_name = source:getDisplayForm()
end
return display, cat_name
end
function export.insert_source_cat_get_display(data)
local categories, lang, source = data.categories, data.lang, data.source
local display, cat_name = export.get_display_and_cat_name(source, data.raw)
if lang and not data.nocat then
-- Add the category, but only if there is a current language
if not categories then
categories = {}
end
local langname = lang:getFullName()
-- If `lang` is an etym-only language, we need to check both it and its parent full language against `source`.
-- Otherwise if e.g. `lang` is Medieval Latin and `source` is Latin, we'll end up wrongly constructing a
-- category 'Latin terms derived from Latin'.
insert(categories, langname .. (
lang_is_source(lang, source) and " terms borrowed back into " .. cat_name or
" " .. (data.borrowing_type or "terms derived") .. " from " .. cat_name
))
end
return display, categories
end
function export.format_source(data)
local lang, sort_key = data.lang, data.sort_key
-- [[Special:WhatLinksHere/Wiktionary:Tracking/etymology/sortkey]]
if sort_key then
track("sortkey")
end
local display, categories = export.insert_source_cat_get_display(data)
if lang and not data.nocat then
-- Format categories, but only if there is a current language; {{cog}} currently gets no categories
categories = format_categories(categories, lang, sort_key, nil, data.force_cat or force_cat)
else
categories = ""
end
return "<span class=\"etyl\">" .. display .. categories .. "</span>"
end
--[==[
Format sources for etymology templates such as {{tl|bor}}, {{tl|der}}, {{tl|inh}} and {{tl|cog}}. There may potentially
be more than one source language (except currently {{tl|inh}}, which doesn't support it because it doesn't really
make sense). In that case, all but the last source language is linked to the first term, but only if there is such a
term and this linking makes sense, i.e. either (1) the term page exists after stripping diacritics according to the
source language in question, or (2) the result of stripping diacritics according to the source language in question
results in a different page from the same process applied with the last source language. For example, {{m|ru|соля́нка}}
will link to [[солянка]] but {{m|en|соля́нка}} will link to [[соля́нка]] with an accent, and since they are different
pages, the use of English as a non-final source with term 'соля́нка' will link to [[соля́нка]] even though it doesn't
exist, on the assumption that it is merely a redlink that might exist. If none of the above criteria apply, a non-final
source language will be linked to the Wikipedia entry for the language, just as final source languages always are.
`data` contains the following fields:
* `lang`: The destination language object into which the terms were borrowed, inherited or otherwise derived. Used for
categorization and can be nil, as with {{tl|cog}}.
* `sources`: List of source objects. Most commonly there is only one. If there are multiple, the non-final ones are
handled specially; see above.
* `terms`: List of term objects. Most commonly there is only one. If there are multiple source objects as well as
multiple term objects, the non-final source objects link to the first term object.
* `sort_key`: Sort key for categories. Usually nil.
* `categories`: Categories to add to the page. Additional categories may be added to `categories` based on the source
languages ('''in which case `categories` is destructively modified'''). If `lang` is nil, no categories will be
added.
* `nocat`: Don't add any categories to the page.
* `sourceconj`: Conjunction used to separate multiple source languages. Defaults to {"and"}. Currently recognized
values are `and`, `or`, `,`, `;`, `/` and `~`.
* `borrowing_type`: Borrowing type used in categories, such as {"learned borrowings"}. Defaults to {"terms derived"}.
* `force_cat`: Force category generation on non-mainspace pages.
]==]
function export.format_sources(data)
local lang, sources, terms, borrowing_type, sort_key, categories, nocat =
data.lang, data.sources, data.terms, data.borrowing_type, data.sort_key, data.categories, data.nocat
local term1, sources_n, source_segs = terms[1], #sources, {}
local final_link_page
local term1_term, term1_sc = term1.term, term1.sc
if sources_n > 1 and term1_term and term1_term ~= "-" then
final_link_page = get_link_page(term1_term, sources[sources_n], term1_sc)
end
for i, source in ipairs(sources) do
local seg, display_term
if i < sources_n and term1_term and term1_term ~= "-" then
local link_page = get_link_page(term1_term, source, term1_sc)
display_term = (link_page ~= final_link_page) or (link_page and not not new_title(link_page):getContent())
end
-- TODO: if the display forms or transliterations are different, display the terms separately.
if display_term then
local display, this_cats = export.insert_source_cat_get_display{
lang = lang,
source = source,
borrowing_type = borrowing_type,
raw = true,
categories = categories,
nocat = nocat,
}
seg = language_link {
lang = source,
term = term1_term,
alt = display,
tr = "-",
}
if lang and not nocat then
-- Format categories, but only if there is a current language; {{cog}} currently gets no categories
this_cats = format_categories(this_cats, lang, sort_key, nil, data.force_cat or force_cat)
else
this_cats = ""
end
seg = "<span class=\"etyl\">" .. seg .. this_cats .. "</span>"
else
seg = export.format_source{
lang = lang,
source = source,
borrowing_type = borrowing_type,
sort_key = sort_key,
categories = categories,
nocat = nocat,
}
end
insert(source_segs, seg)
end
return join_segs(source_segs, data.sourceconj or "and")
end
-- Internal implementation of {{cognate}}/{{cog}} template.
function export.format_cognate(data)
return export.format_derived {
sources = data.sources,
terms = data.terms,
sort_key = data.sort_key,
sourceconj = data.sourceconj,
conj = data.conj,
template_name = "cognate",
force_cat = data.force_cat,
}
end
--[==[
Internal implementation of {{derived}}/{{der}} template. This dispThis is called externally from [[Module:affix]],
[[Module:affixusex]] and [[Module:see]] and needs to support qualifiers, labels and references on the outside
of the sources for use by those modules.
`data` contains the following fields:
* `lang`: The destination language object into which the terms were derived. Used for categorization and can be nil, as
with {{tl|cog}}; in this case, no categories are added.
* `sources`: List of source objects. Most commonly there is only one. If there are multiple, the non-final ones are
handled specially; see `format_sources()`.
* `terms`: List of term objects. Most commonly there is only one. If there are multiple source objects as well as
multiple term objects, the non-final source objects link to the first term object.
* `conj`: Conjunction used to separate multiple terms. '''Required'''. Currently recognized values are `and`, `or`, `,`,
`;`, `/` and `~`.
* `sourceconj`: Conjunction used to separate multiple source languages. Defaults to {"and"}. Currently recognized
values are as for `conj` above.
* `qualifiers_labels_on_outside`: If specified, any qualifiers, labels or references in the first term in `terms` will
be displayed on the outside of (before) the source language(s) in `sources`. Normally this should be specified if
there is only one term possible in `terms`.
* `template_name`: Name of the template invoking this function. Must be specified. Only used for tracking pages.
* `sort_key`: Sort key for categories. Usually nil.
* `categories`: Categories to add to the page. Additional categories may be added to `categories` based on the source
languages ('''in which case `categories` is destructively modified'''). If `lang` is nil, no categories will be
added.
* `nocat`: Don't add any categories to the page.
* `borrowing_type`: Borrowing type used in categories, such as {"learned borrowings"}. Defaults to {"terms derived"}.
* `force_cat`: Force category generation on non-mainspace pages.
]==]
function export.format_derived(data)
local terms = data.terms
local sourcetext = export.format_sources(data)
return export.format_links(terms, data.conj, data.template_name, sourcetext, data.qualifiers_labels_on_outside)
end
function export.insert_borrowed_cat(categories, lang, source)
if lang_is_source(lang, source) then
return
end
-- If both are the same, we want e.g. [[:Category:English terms borrowed back into English]] not
-- [[:Category:English terms borrowed from English]]; the former is inserted automatically by format_source().
-- The second parameter here doesn't matter as it only affects `display`, which we don't use.
insert(categories, lang:getFullName() .. " terms borrowed from " .. select(2, export.get_display_and_cat_name(source, "raw")))
end
-- Internal implementation of {{borrowed}}/{{bor}} template.
function export.format_borrowed(data)
local categories = {}
if not data.nocat then
local lang = data.lang
for _, source in ipairs(data.sources) do
export.insert_borrowed_cat(categories, lang, source)
end
end
data = shallow_copy(data)
data.categories = categories
return export.format_links(data.terms, data.conj, "borrowed", export.format_sources(data))
end
do
-- Generate the non-ancestor error message.
local function show_language(lang)
local retval = ("%s (%s)"):format(lang:makeCategoryLink(), lang:getCode())
if lang:hasType("etymology-only") then
retval = retval .. (" (an etymology-only language whose regular parent is %s)"):format(
show_language(lang:getParent()))
end
return retval
end
-- Check that `lang` has `otherlang` (which may be an etymology-only language) as an ancestor. Throw an error if
-- not. When `lang` is a family, verifies that `otherlang` is a language in that family.
function export.check_ancestor(lang, otherlang)
-- When `lang` is a family, verify `otherlang` is in that family or in its parent family.
if lang.hasType and lang:hasType("family") then
local family_code = lang:getCode()
local function in_family_code(fcode, other)
if not fcode or fcode == "" then return false end
if other.inFamily and other:inFamily(fcode) then return true end
if other.getFamilyCode and other:getFamilyCode() == fcode then return true end
return false
end
local in_family = in_family_code(family_code, otherlang)
if not in_family then
local parent_code
if lang.getParent then
local parent_family = lang:getParent()
if parent_family and parent_family.getCode then
parent_code = parent_family:getCode()
end
end
if not parent_code and family_code:find("-", 1, true) then
parent_code = family_code:match("^(.+)-[^-]+$")
end
if parent_code then
in_family = in_family_code(parent_code, otherlang)
end
end
if not in_family then
local other_display = (otherlang.getCanonicalName and otherlang:getCanonicalName()) or (otherlang.getCode and otherlang:getCode()) or tostring(otherlang)
local fam_display = (lang.getCanonicalName and lang:getCanonicalName()) or family_code
error(("%s is not in family %s; inherited ancestor under a family must be a language in that family or its parent family.")
:format(other_display, fam_display))
end
return
end
-- FIXME: I don't know if this function works correctly with etym-only languages in `lang`. I have fixed up
-- the module link code appropriately (June 2024) but the remaining logic is untouched.
if lang:hasAncestor(otherlang) then
-- [[Special:WhatLinksHere/Wiktionary:Tracking/etymology/variety]]
-- Track inheritance from varieties of Latin that shouldn't have any descendants (everything except Old Latin, Classical Latin and Vulgar Latin).
if otherlang:getFullCode() == "la" then
otherlang = otherlang:getCode()
if not (otherlang == "itc-ola" or otherlang == "la-cla" or otherlang == "la-vul") then
track("bad ancestor", otherlang)
end
end
return
end
local ancestors, postscript = lang:getAncestors()
local etym_module_link = lang:hasType("etymology-only") and "[[Module:etymology languages/data]] or " or ""
local module_link = "[[" .. get_language_data_module_name(lang:getFullCode()) .. "]]"
if not ancestors[1] then
postscript = show_language(lang) .. " has no ancestors."
else
local ancestor_list = {}
for _, ancestor in ipairs(ancestors) do
insert(ancestor_list, show_language(ancestor))
end
postscript = ("The ancestor%s of %s %s %s."):format(
ancestors[2] and "s" or "", lang:getCanonicalName(),
ancestors[2] and "are" or "is", concat(ancestor_list, " and "))
end
error(("%s is not set as an ancestor of %s in %s%s. %s")
:format(show_language(otherlang), show_language(lang), etym_module_link, module_link, postscript))
end
end
-- Internal implementation of {{inherited}}/{{inh}} template.
function export.format_inherited(data)
local lang, terms, nocat = data.lang, data.terms, data.nocat
local source = terms[1].lang
local categories = {}
if not nocat then
insert(categories, lang:getFullName() .. " terms inherited from " .. source:getCanonicalName())
end
export.check_ancestor(lang, source)
data = shallow_copy(data)
data.categories = categories
data.source = source
return export.format_links(terms, data.conj, "inherited", export.format_source(data))
end
-- Internal implementation of "misc variant" templates such as {{abbrev}}, {{clipping}}, {{reduplication}} and the like.
function export.format_misc_variant(data)
local lang, notext, terms, cats, parts = data.lang, data.notext, data.terms, data.cats, {}
if not notext then
insert(parts, data.text)
end
if terms[1] then
if not notext then
-- FIXME: If term is given as '-', we should consider displaying just "Clipping" not "Clipping of".
insert(parts, " " .. (data.oftext or "of"))
end
local termparts = {}
-- Make links out of all the parts.
for _, termobj in ipairs(terms) do
local result
if termobj.lang then
result = export.format_derived {
lang = lang,
terms = {termobj},
sources = termobj.termlangs or {termobj.lang},
template_name = "misc_variant",
qualifiers_labels_on_outside = true,
force_cat = data.force_cat,
}
else
termobj.lang = lang
result = export.format_links({termobj}, nil, "misc_variant")
end
table.insert(termparts, result)
end
local linktext = join_segs(termparts, data.conj)
if not notext and linktext ~= "" then
insert(parts, " ")
end
insert(parts, linktext)
end
local categories = {}
if not data.nocat and cats then
for _, cat in ipairs(cats) do
insert(categories, lang:getFullName() .. " " .. cat)
end
end
if categories[1] then
insert(parts, format_categories(categories, lang, data.sort_key, nil, data.force_cat or force_cat))
end
return concat(parts)
end
-- Implementation of miscellaneous templates such as {{unknown}} and {{onomatopoeia}} that have no associated terms.
function export.format_misc_variant_no_term(data)
local parts = {}
if not data.notext then
insert(parts, data.title)
end
if not data.nocat and data.cat then
local lang, categories = data.lang, {}
insert(categories, lang:getFullName() .. " " .. data.cat)
insert(parts, format_categories(categories, lang, data.sort_key, nil, data.force_cat or force_cat))
end
return concat(parts)
end
return export
719oqh4cehi6zb4y8xnbsb9vtnae84e
মডিউল:names
828
58828
507780
507773
2026-04-13T19:34:01Z
Redmin
6857
[[Special:Contributions/Redmin|Redmin]] ([[User talk:Redmin|আলাপ]])-এর সম্পাদিত [[Special:Diff/507773|507773]] নম্বর সংশোধনটি বাতিল করা হয়েছে
507780
Scribunto
text/plain
local export = {}
local m_languages = require("Module:languages")
local m_links = require("Module:links")
local m_utilities = require("Module:utilities")
local m_str_utils = require("Module:string utilities")
local m_table = require("Module:table")
local en_utilities_module = "Module:en-utilities"
local parameter_utilities_module = "Module:parameter utilities"
local parse_interface_module = "Module:parse interface"
local parse_utilities_module = "Module:parse utilities"
local pron_qualifier_module = "Module:pron qualifier"
local enlang = m_languages.getByCode("en")
local rsubn = m_str_utils.gsub
local rsplit = m_str_utils.split
local u = m_str_utils.char
local function rsub(str, from, to)
return (rsubn(str, from, to))
end
local TEMP_LESS_THAN = u(0xFFF2)
local force_cat = false -- for testing
--[=[
FIXME:
1. from=the Bible (DONE)
2. origin=18th century [DONE]
3. popular= (DONE)
4. varoftype= (DONE)
5. eqtype= [DONE]
6. dimoftype= [DONE]
7. from=de:Elisabeth (same language) (DONE)
8. blendof=, blendof2= [DONE]
9. varform, dimform [DONE]
10. from=English < Latin [DONE]
11. usage=rare -> categorize as rare?
12. dimeq= (also vareq=?) [DONE]
13. fromtype= [DONE]
14. <tr:...> and similar params [DONE]
]=]
-- Used in category code; name types which are full-word end-matching substrings of longer name types (e.g. "পদবি"
-- of "পুরুষ পদবি", but not "পুরুষ পদবি" of "মহিলা পদবি" because "পুরুষ" only matches a part of the word
-- "মহিলা") should follow the longer name.
export.personal_name_types = {
"পুরুষ পদবি", "মহিলা পদবি", "common-gender পদবি", "পদবি",
"patronymics", "matronymics",
}
export.personal_name_type_set = m_table.listToSet(export.personal_name_types)
export.given_name_genders = {
male = {type = "human"},
female = {type = "human"},
["পুরুষ"] = {type = "human"},
["নারী"] = {type = "human"},
unisex = {type = "human", cat = {"পুরুষ প্রদত্ত নাম", "মহিলা প্রদত্ত নাম", "unisex প্রদত্ত নাম"}, article = "একটি"},
["unknown-gender"] = {type = "human", cat = {}, track = true},
animal = {type = "animal", track = true},
cat = {type = "animal"},
cow = {type = "animal"},
dog = {type = "animal"},
horse = {type = "animal"},
pig = {type = "animal"},
}
local function get_given_name_cats(gender, props)
local cats = props.cat
if not cats then
if props.type == "animal" then
cats = {gender .. " names"}
else
cats = {gender .. " প্রদত্ত নাম"}
end
end
return cats
end
do
local function do_cat(cat)
if not export.personal_name_type_set[cat] then
export.personal_name_type_set[cat] = true
table.insert(export.personal_name_types, cat)
end
end
for gender, props in pairs(export.given_name_genders) do
local cats = get_given_name_cats(gender, props)
for _, cat in ipairs(cats) do
do_cat("diminutives of " .. cat)
do_cat("augmentatives of " .. cat)
do_cat(cat)
end
end
do_cat("প্রদত্ত নাম")
end
local translit_name_type_list = {
"পদবি", "পুরুষ given name", "মহিলা প্রদত্ত নাম", "unisex প্রদত্ত নাম",
"patronymic"
}
local function track(page)
require("Module:debug").track("names/" .. page)
end
-- Get raw text, for use in computing the indefinite article. Use get_plaintext() in [[Module:utilities]] and also
-- remove parens that may surround qualifier or label text preceding a term.
local function get_rawtext(text)
text = m_utilities.get_plaintext(text)
text = text:gsub("[()%[%]]", "")
return text
end
--[=[
Parse a term and associated properties. This works with parameters of the form 'Karlheinz' or
'Kunigunde<q:medieval, now rare>' or 'non:Óláfr' or 'ru:Фру́нзе<tr:Frúnzɛ><q:rare>' where the modifying properties
are contained in <...> specifications after the term. `term` is the full parameter value including any angle brackets
and colons; `paramname` is the name of the parameter that this value comes from, for error purposes; `deflang` is a
language object used in the return value when the language isn't specified (e.g. in the examples 'Karlheinz' and
'Kunigunde<q:medieval, now rare>' above); `allow_explicit_lang` indicates whether the language can be explicitly given
(e.g. in the examples 'non:Óláfr' or 'ru:Фру́нзе<tr:Frúnzɛ><q:rare>' above).
Normally the return value is a terminfo object that can be passed to full_link() in [[Module:links]]), additionally
with optional fields `.q`, `.qq`, `.l`, `.ll`, `.refs` and `.eq` (a list of objects of the same form as the returned
terminfo object. However, if `allow_multiple_terms` is given, multiple comma-separated names can be given in `term`,
and the return value is a list of objects of the form described just above.
]=]
local function parse_term_with_annotations(term, paramname, deflang, allow_explicit_lang, allow_multiple_terms)
local param_mods = require(parameter_utilities_module).construct_param_mods {
{group = {"link", "l", "q", "ref"}},
{param = "eq", convert = function(eqval, parse_err)
return parse_term_with_annotations(eqval, paramname .. ".eq", enlang, false, "allow multiple terms")
end},
}
local function generate_obj(term, parse_err)
local termlang
if allow_explicit_lang then
local actual_term
actual_term, termlang = require(parse_interface_module).parse_term_with_lang {
term = term,
parse_err = error,
paramname = paramname,
}
term = actual_term or term
end
return {
term = term,
lang = termlang or deflang,
}
end
return require(parse_interface_module).parse_inline_modifiers(term, {
param_mods = param_mods,
paramname = paramname,
generate_obj = generate_obj,
splitchar = allow_multiple_terms and "," or nil,
})
end
--[=[
Link a single term. If `do_language_link` is given and a given term's language is English, the link will be constructed
using language_link() in [[Module:links]]; otherwise, with full_link(). `termobj` is an object as returned by
parse_term_with_annotations(), i.e. it is suitable for passing to [[Module:links]] and additionally contains optional
fields `.q`, `.qq`, `.l`, `.ll`, `.refs` and `.eq` (a list of objects of the same form as `termobj`).
]=]
local function link_one_term(termobj, do_language_link)
local link
if do_language_link and termobj.lang:getCode() == "en" then
link = m_links.language_link(termobj)
else
link = m_links.full_link(termobj)
end
if termobj.q and termobj.q[1] or termobj.qq and termobj.qq[1] or
termobj.l and termobj.l[1] or termobj.ll and termobj.ll[1] or termobj.refs and termobj.refs[1] then
link = require(pron_qualifier_module).format_qualifiers {
lang = termobj.lang,
text = link,
q = termobj.q,
qq = termobj.qq,
l = termobj.l,
ll = termobj.ll,
refs = termobj.refs,
}
end
if termobj.eq then
local eqtext = {}
for _, eqobj in ipairs(termobj.eq) do
table.insert(eqtext, link_one_term(eqobj, true))
end
link = link .. " [=" .. m_table.serialCommaJoin(eqtext, {conj = "or"}) .. "]"
end
return link
end
--[=[
Link the terms in `terms`, and join them using the conjunction in `conj` (defaulting to "or"). Joining is done using
serialCommaJoin() in [[Module:table]], so that e.g. two terms are joined as "TERM or TERM" while three terms are joined
as "TERM, TERM or TERM" with special CSS spans before the final "or" to allow an "Oxford comma" to appear if configured
appropriately. (However, if `conj` is the special value ", ", joining is done directly using that value.)
If `include_langname` is given, the language of the first term will be prepended to the joined terms. If
`do_language_link` is given and a given term's language is English, the link will be constructed using language_link()
in [[Module:links]]; otherwise, with full_link(). Each term in `terms` is an object as returned by
parse_term_with_annotations().
]=]
local function join_terms(terms, include_langname, do_language_link, conj)
local links = {}
local langnametext
for _, termobj in ipairs(terms) do
if include_langname and not langnametext then
langnametext = termobj.lang:getCanonicalName() .. " "
end
table.insert(links, link_one_term(termobj, do_language_link))
end
local joined_terms
if conj == ", " then
joined_terms = table.concat(links, conj)
else
joined_terms = m_table.serialCommaJoin(links, {conj = conj or "or"})
end
return (langnametext or "") .. joined_terms
end
--[=[
Gather the parameters for multiple names and link each name using full_link() (for foreign names) or language_link()
(for English names), joining the names using serialCommaJoin() in [[Module:table]] with the conjunction `conj`
(defaulting to "or"). (However, if `conj` is the special value ", ", joining is done directly using that value.)
This can be used, for example, to fetch and join all the masculine equivalent names for a feminine প্রদত্ত নাম. Each
name is specified using parameters beginning with `pname` in `args`, e.g. "m", "m2", "m3", etc. `lang` is a language
object specifying the language of the names (defaulting to English), for use in linking them. If `allow_explicit_lang`
is given, the language of the terms can be specified explicitly by prefixing a term with a language code, e.g.
'sv:Björn' or 'la:[[Nicolaus|Nīcolāī]]'. This function assumes that the parameters have already been parsed by
[[Module:parameters]] and gathered into lists, so that e.g. all "mN" parameters are in a list in args["m"].
]=]
local function join_names(lang, args, pname, conj, allow_explicit_lang)
local termobjs = {}
local do_language_link = false
if not lang then
lang = enlang
do_language_link = true
end
local function process_one_term(term, i)
for _, termobj in ipairs(parse_term_with_annotations(term, pname .. (i == 1 and "" or i), lang,
allow_explicit_lang, "allow multiple terms")) do
table.insert(termobjs, termobj)
end
end
if not args[pname] then
return "", 0
elseif type(args[pname]) == "table" then
for i, term in ipairs(args[pname]) do
process_one_term(term, i)
end
else
process_one_term(args[pname], 1)
end
return join_terms(termobjs, nil, do_language_link, conj), #termobjs
end
local function get_eqtext(args)
local eqsegs = {}
local lastlang = nil
local last_eqseg = {}
local function process_one_term(term, i)
for _, termobj in ipairs(parse_term_with_annotations(term, "eq" .. (i == 1 and "" or i), enlang,
"allow explicit lang", "allow multiple terms")) do
local termlang = termobj.lang:getCode()
if lastlang and lastlang ~= termlang then
if #last_eqseg > 0 then
table.insert(eqsegs, last_eqseg)
end
last_eqseg = {}
end
lastlang = termlang
table.insert(last_eqseg, termobj)
end
end
if type(args.eq) == "table" then
for i, term in ipairs(args.eq) do
process_one_term(term, i)
end
elseif type(args.eq) == "string" then
process_one_term(args.eq, 1)
end
if #last_eqseg > 0 then
table.insert(eqsegs, last_eqseg)
end
local eqtextsegs = {}
for _, eqseg in ipairs(eqsegs) do
table.insert(eqtextsegs, join_terms(eqseg, "include langname"))
end
return m_table.serialCommaJoin(eqtextsegs, {conj = "or"})
end
local function get_fromtext(lang, args)
local catparts = {}
local fromsegs = {}
local i = 1
local function parse_from(from)
local unrecognized = false
local prefix, suffix
if from == "পদবি" or from == "প্রদত্ত নাম" or from == "nicknames" or from == "place names" or from == "common nouns" or from == "month names" then
prefix = "transferred from the "
suffix = from:gsub("s$", "")
table.insert(catparts, from)
elseif from == "patronymics" or from == "matronymics" or from == "coinages" then
prefix = "originating "
suffix = "as a " .. from:gsub("s$", "")
table.insert(catparts, from)
elseif from == "occupations" or from == "ethnonyms" then
prefix = "originating "
suffix = "as an " .. from:gsub("s$", "")
table.insert(catparts, from)
elseif from == "the Bible" then
prefix = "originating "
suffix = "from the Bible"
table.insert(catparts, from)
else
prefix = "from "
if from:find(":") then
local termobj = parse_term_with_annotations(from, "from" .. (i == 1 and "" or i), lang,
"allow explicit lang")
local fromlangname = ""
if termobj.lang:getCode() ~= lang:getCode() then
-- If name is derived from another name in the same language, don't include lang name after text
-- "from " or create a category like "German পুরুষ প্রদত্ত নাম derived from German".
local canonical_name = termobj.lang:getCanonicalName()
fromlangname = canonical_name .. " "
table.insert(catparts, canonical_name)
end
suffix = fromlangname .. link_one_term(termobj)
else
local family = from:match("^(.+) languages$") or
from:match("^.+ Languages$") or
from:match("^.+ [Ll]ects$")
if family then
if require("Module:families").getByCanonicalName(family) then
table.insert(catparts, from)
else
unrecognized = true
end
suffix = "the " .. from
else
if m_languages.getByCanonicalName(from, nil, "allow etym") then
table.insert(catparts, from)
else
unrecognized = true
end
suffix = from
end
end
end
if unrecognized then
track("unrecognized from")
track("unrecognized from/" .. from)
end
return prefix, suffix
end
local last_fromseg = nil
local put = require(parse_utilities_module)
local from_args = args.from or {}
if type(from_args) == "string" then
from_args = {from_args}
end
while from_args[i] do
-- We may have multiple comma-separated items, each of which may have multiple items separated by a
-- space-delimited < sign, each of which may have inline modifiers with embedded commas in them. To handle
-- this correctly, first replace space-delimited < signs with a special character, then split on balanced
-- <...> and [...] signs, then split on comma, then rejoin the stuff between commas. We will then split on
-- TEMP_LESS_THAN (the replacement for space-delimited < signs) and reparse.
local rawfroms = rsub(from_args[i], "%s+<%s+", TEMP_LESS_THAN)
local segments = put.parse_multi_delimiter_balanced_segment_run(rawfroms, {{"<", ">"}, {"[", "]"}})
local comma_separated_groups = put.split_alternating_runs_on_comma(segments)
for j, comma_separated_group in ipairs(comma_separated_groups) do
comma_separated_groups[j] = table.concat(comma_separated_group)
end
for _, rawfrom in ipairs(comma_separated_groups) do
local froms = rsplit(rawfrom, TEMP_LESS_THAN)
if #froms == 1 then
local prefix, suffix = parse_from(froms[1])
if last_fromseg and (last_fromseg.has_multiple_froms or last_fromseg.prefix ~= prefix) then
table.insert(fromsegs, last_fromseg)
last_fromseg = nil
end
if not last_fromseg then
last_fromseg = {prefix = prefix, suffixes = {}}
end
table.insert(last_fromseg.suffixes, suffix)
else
if last_fromseg then
table.insert(fromsegs, last_fromseg)
last_fromseg = nil
end
local first_suffixpart = ""
local rest_suffixparts = {}
for j, from in ipairs(froms) do
local prefix, suffix = parse_from(from)
if j == 1 then
first_suffixpart = prefix .. suffix
else
table.insert(rest_suffixparts, prefix .. suffix)
end
end
local full_suffix = first_suffixpart .. " [in turn " .. table.concat(rest_suffixparts, ", in turn ") .. "]"
last_fromseg = {prefix = "", has_multiple_froms = true, suffixes = {full_suffix}}
end
end
i = i + 1
end
table.insert(fromsegs, last_fromseg)
local fromtextsegs = {}
for _, fromseg in ipairs(fromsegs) do
table.insert(fromtextsegs, fromseg.prefix .. m_table.serialCommaJoin(fromseg.suffixes, {conj = "or"}))
end
return m_table.serialCommaJoin(fromtextsegs, {conj = "or"}), catparts
end
local function parse_given_name_genders(genderspec)
if export.given_name_genders[genderspec] then -- optimization
return {{
type = genderspec,
props = export.given_name_genders[genderspec],
}}, export.given_name_genders[genderspec].type == "animal"
end
local genders = {}
local is_animal = nil
local param_mods = require(parameter_utilities_module).construct_param_mods {
{group = {"l", "q", "ref"}},
{param = {"text", "article"}},
}
local function generate_obj(term, parse_err)
if not export.given_name_genders[term] then
local valid_genders = {}
for k, _ in pairs(export.given_name_genders) do
table.insert(valid_genders, k)
end
table.sort(valid_genders)
error(("Unrecognized gender '%s': valid genders are %s"):format(
term, table.concat(valid_genders, ", ")))
end
return {
type = term,
props = export.given_name_genders[term],
}
end
local retval = require(parse_interface_module).parse_inline_modifiers(genderspec, {
param_mods = param_mods,
paramname = "2",
generate_obj = generate_obj,
splitchar = ",",
})
for _, spec in ipairs(retval) do
local this_is_animal = spec.props.type == "animal"
if is_animal == nil then
is_animal = this_is_animal
elseif is_animal ~= this_is_animal then
error("Can't mix animal and human genders")
end
end
return retval, is_animal
end
local function generate_given_name_genders(lang, genders)
local parts = {}
for _, spec in ipairs(genders) do
local text
if spec.text then
-- NOTE: This assumes no % sign in the gender type, which seems safe.
text = spec.text:gsub("%+", spec.type)
else
if spec.props.type == "animal" then
text = "[[" .. spec.type .. "]]"
else
text = spec.type
end
end
if spec.q and spec.q[1] or spec.qq and spec.qq[1] or spec.l and spec.l[1] or spec.ll and spec.ll[1] or
spec.refs and spec.refs[1] then
text = require(pron_qualifier_module).format_qualifiers {
lang = lang,
text = text,
q = spec.q,
qq = spec.qq,
l = spec.l,
ll = spec.ll,
refs = spec.refs,
raw = true,
}
end
table.insert(parts, text)
end
local retval = m_table.serialCommaJoin(parts, {conj = "or"})
local article = genders[1].article
if not article and not genders[1].text and not genders[1].q and not genders[1].l then
article = genders[1].props.article
end
if not article then
article = require(en_utilities_module).get_indefinite_article(get_rawtext(retval))
end
return retval, article
end
-- The entry point for {{given name}}.
function export.given_name(frame)
local parent_args = frame:getParent().args
local compat = parent_args.lang
local offset = compat and 0 or 1
local lang_index = compat and "lang" or 1
local list = {list = true}
local args = require("Module:parameters").process(parent_args, {
[lang_index] = {required = true, type = "language", default = "und"},
["gender"] = {default = "unknown-gender"},
[1 + offset] = {alias_of = "gender"},
["usage"] = true,
["origin"] = true,
["popular"] = true,
["populartype"] = true,
["meaning"] = list,
["meaningtype"] = true,
["addl"] = true,
-- initial article: A or An
["একটি"] = true,
["sort"] = true,
["from"] = true,
[2 + offset] = {alias_of = "from"},
["fromtype"] = true,
["xlit"] = true,
["eq"] = true,
["eqtype"] = true,
["varof"] = true,
["varoftype"] = true,
["var"] = {alias_of = "varof"},
["vartype"] = {alias_of = "varoftype"},
["varform"] = true,
["varformtype"] = true,
["dimof"] = true,
["dimoftype"] = true,
["dim"] = {alias_of = "dimof"},
["dimtype"] = {alias_of = "dimoftype"},
["dimform"] = true,
["dimformtype"] = true,
["augof"] = true,
["augoftype"] = true,
["aug"] = {alias_of = "augof"},
["augtype"] = {alias_of = "augoftype"},
["augform"] = true,
["augformtype"] = true,
["clipof"] = true,
["clipoftype"] = true,
["blend"] = true,
["blendtype"] = true,
["m"] = true,
["mtype"] = true,
["f"] = true,
["ftype"] = true,
})
local textsegs = {}
local lang = args[lang_index]
local langcode = lang:getCode()
local function fetch_typetext(param)
return args[param] and args[param] .. " " or ""
end
local genders, is_animal = parse_given_name_genders(args.gender)
local dimoftext, numdimofs = join_names(lang, args, "dimof")
local augoftext, numaugofs = join_names(lang, args, "augof")
local xlittext = join_names(nil, args, "xlit")
local blendtext = join_names(lang, args, "blend", "and")
local varoftext = join_names(lang, args, "varof")
local clipoftext = join_names(lang, args, "clipof")
local mtext = join_names(lang, args, "m")
local ftext = join_names(lang, args, "f")
local varformtext, numvarforms = join_names(lang, args, "varform", ", ")
local dimformtext, numdimforms = join_names(lang, args, "dimform", ", ")
local augformtext, numaugforms = join_names(lang, args, "augform", ", ")
local meaningsegs = {}
for _, meaning in ipairs(args.meaning) do
table.insert(meaningsegs, '“' .. meaning .. '”')
end
local meaningtext = m_table.serialCommaJoin(meaningsegs, {conj = "or"})
local eqtext = get_eqtext(args)
local function ins(txt)
table.insert(textsegs, txt)
end
local dimoftype = args.dimoftype
local augoftype = args.augoftype
if numdimofs > 0 then
ins((dimoftype and dimoftype .. " " or "") .. "[[diminutive]]" ..
(xlittext ~= "" and ", " .. xlittext .. "," or "") .. " of the ")
elseif numaugofs > 0 then
ins((augoftype and augoftype .. " " or "") .. "[[augmentative]]" ..
(xlittext ~= "" and ", " .. xlittext .. "," or "") .. " of the ")
end
local article = args.A
if not article and textsegs[1] then
article = require(en_utilities_module).get_indefinite_article(textsegs[1])
end
if not is_animal then
local gendertext, gender_article = generate_given_name_genders(lang, genders)
article = article or gender_article
ins(gendertext)
ins(" ")
end
ins((numdimofs > 1 or numaugofs > 1) and "[[প্রদত্ত নাম|প্রদত্ত নাম]]" or "[[প্রদত্ত নাম]]")
article = article or "একটি" -- if no article set yet, it's "একটি" based on "প্রদত্ত নাম"
if langcode == "en" then
article = mw.getContentLanguage():ucfirst(article)
end
local need_comma = false
if numdimofs > 0 then
ins(" " .. dimoftext)
need_comma = not is_animal
elseif numaugofs > 0 then
ins(" " .. augoftext)
need_comma = not is_animal
elseif xlittext ~= "" then
ins(", " .. xlittext)
need_comma = true
end
if is_animal then
if need_comma then
ins(",")
end
need_comma = true
ins(" for ")
local gendertext, gender_article = generate_given_name_genders(lang, genders)
ins(gender_article)
ins(" ")
ins(gendertext)
end
local from_catparts = {}
if args.from then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("fromtype"))
local textseg, this_catparts = get_fromtext(lang, args)
for _, catpart in ipairs(this_catparts) do
m_table.insertIfNot(from_catparts, catpart)
end
ins(textseg)
end
if meaningtext ~= "" then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("meaningtype") .. "meaning " .. meaningtext)
end
if args.origin then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.origin .. " origin")
end
if args.usage then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.usage .. " usage")
end
if varoftext ~= "" then
ins(", " ..fetch_typetext("varoftype") .. "variant of " .. varoftext)
end
if clipoftext ~= "" then
ins(", " .. fetch_typetext("clipoftype") .. "clipping of " .. clipoftext)
end
if blendtext ~= "" then
ins(", " .. fetch_typetext("blendtype") .. "blend of " .. blendtext)
end
if args.popular then
ins(", " .. fetch_typetext("populartype") .. "popular " .. args.popular)
end
if mtext ~= "" then
ins(", " .. fetch_typetext("mtype") .. "masculine equivalent " .. mtext)
end
if ftext ~= "" then
ins(", " .. fetch_typetext("ftype") .. "feminine equivalent " .. ftext)
end
if eqtext ~= "" then
ins(", " .. fetch_typetext("eqtype") .. "equivalent to " .. eqtext)
end
if args.addl then
if args.addl:find("^;") then
ins(args.addl)
elseif args.addl:find("^_") then
ins(" " .. args.addl:sub(2))
else
ins(", " .. args.addl)
end
end
if varformtext ~= "" then
ins("; " .. fetch_typetext("varformtype") .. "variant form" .. (numvarforms > 1) .. " " ..
varformtext)
end
if dimformtext ~= "" then
ins("; " .. fetch_typetext("dimformtype") .. "diminutive form" .. (numdimforms > 1) .. " " ..
dimformtext)
end
if augformtext ~= "" then
ins("; " .. fetch_typetext("augformtype") .. "augmentative form" .. (numaugforms > 1) .. " " ..
augformtext)
end
textsegs = "<span class='use-with-mention'>" .. article .. " " .. table.concat(textsegs) .. "</span>"
local categories = {}
local langname = lang:getCanonicalName() .. " "
local function insert_cats(dimaugof)
if dimaugof == "" and genders[1].props.type == "human" then
-- No category such as "English diminutives of প্রদত্ত নাম"
table.insert(categories, langname .. "প্রদত্ত নাম")
end
local function insert_cat(cat)
table.insert(categories, langname .. dimaugof .. cat)
for _, catpart in ipairs(from_catparts) do
table.insert(categories, langname .. dimaugof .. cat .. " from " .. catpart)
end
end
for _, spec in ipairs(genders) do
local typ = spec.type
if spec.props.track then
track(typ)
end
local cats = get_given_name_cats(spec.type, spec.props)
for _, cat in ipairs(cats) do
insert_cat(cat)
end
end
end
insert_cats("")
if numdimofs > 0 then
insert_cats("diminutives of ")
elseif numaugofs > 0 then
insert_cats("augmentatives of ")
end
return textsegs .. m_utilities.format_categories(categories, lang, args.sort, nil, force_cat)
end
-- The entry point for {{surname}}, {{patronymic}} and {{matronymic}}.
function export.surname(frame)
local iargs = require("Module:parameters").process(frame.args, {
["type"] = {required = true, set = {"পদবি", "patronymic", "matronymic"}},
})
local parent_args = frame:getParent().args
local compat = parent_args.lang
local offset = compat and 0 or 1
if parent_args.dot or parent_args.nodot then
error("dot= and nodot= are no longer supported in [[Template:" .. iargs.type .. "]] because a trailing " ..
"period is no longer added by default; if you want it, add it explicitly after the template")
end
local lang_index = compat and "lang" or 1
local list = {list = true}
local gender_arg = iargs.type == "পদবি" and "g" or 1 + offset
local adj_arg = iargs.type == "পদবি" and 1 + offset or 2 + offset
local args = require("Module:parameters").process(parent_args, {
[lang_index] = {required = true, type = "language", template_default = "und"},
[gender_arg] = iargs.type == "পদবি" and true or {required = true, template_default = "unknown"}, -- gender(s)
[adj_arg] = true, -- adjective/qualifier
["usage"] = true,
["origin"] = true,
["popular"] = true,
["populartype"] = true,
["meaning"] = list,
["meaningtype"] = true,
["parent"] = true,
["addl"] = true,
-- initial article: by default A or An (English), a or an (otherwise)
["একটি"] = true,
["sort"] = true,
["from"] = true,
["fromtype"] = true,
["xlit"] = true,
["eq"] = true,
["eqtype"] = true,
["varof"] = true,
["varoftype"] = true,
["var"] = {alias_of = "varof"},
["vartype"] = {alias_of = "varoftype"},
["varform"] = true,
["varformtype"] = true,
["clipof"] = true,
["clipoftype"] = true,
["blend"] = true,
["blendtype"] = true,
["m"] = true,
["mtype"] = true,
["f"] = true,
["ftype"] = true,
["nocat"] = {type = "boolean"},
})
local textsegs = {}
local lang = args[lang_index]
local langcode = lang:getCode()
local function fetch_typetext(param)
return args[param] and args[param] .. " " or ""
end
local saw_male = false
local saw_female = false
local genders = {}
if args[gender_arg] then
for _, g in ipairs(require(parse_interface_module).split_on_comma(args[gender_arg])) do
if g == "unknown" or g == "unknown gender" or g == "unknown-gender" or g == "?" then
g = "unknown-gender"
track("unknown gender")
elseif g == "unisex" or g == "common gender" or g == "common-gender" or g == "c" then
g = "common-gender"
saw_male = true
saw_female = true
elseif g == "m" or g == "পুরুষ" or g == "পুরুষ" then
g = "পুরুষ"
saw_male = true
elseif g == "f" or g == "মহিলা" or g == "নারী" then
g = "মহিলা"
saw_female = true
else
error("Unrecognized gender: " .. g)
end
table.insert(genders, g)
end
end
local adj = args[adj_arg]
local xlittext = join_names(nil, args, "xlit")
local blendtext = join_names(lang, args, "blend", "and")
local varoftext = join_names(lang, args, "varof")
local clipoftext = join_names(lang, args, "clipof")
local mtext = join_names(lang, args, "m")
local ftext = join_names(lang, args, "f")
local parenttext = join_names(lang, args, "parent", nil, "allow explicit lang")
local varformtext, numvarforms = join_names(lang, args, "varform", ", ")
local meaningsegs = {}
for _, meaning in ipairs(args.meaning) do
table.insert(meaningsegs, '“' .. meaning .. '”')
end
if parenttext ~= "" then
local child = saw_male and not saw_female and "son" or saw_female and not saw_male and "daughter" or
"son/daughter"
table.insert(meaningsegs, ("“%s of %s”"):format(child, parenttext))
end
local meaningtext = m_table.serialCommaJoin(meaningsegs, {conj = "or"})
local eqtext = get_eqtext(args)
local function ins(txt)
table.insert(textsegs, txt)
end
ins("<span class='use-with-mention'>")
-- If gender is supplied, it goes before the specified adjective in adj=. The only value of gender that uses "an" is
-- "unknown-gender" (note that "unisex" wouldn't use it but in any case we map "unisex" to "common-gender"). If gender
-- isn't supplied, look at the first letter of the value of adj= if supplied; otherwise, the article is always "একটি"
-- because the word "পদবি", "patronymic" or "matronymic" follows. Capitalize "একটি"/"An" if English.
local article
if args.A then
article = args.A
else
article = #genders > 0 and genders[1] == "unknown-gender" and "an" or
#genders == 0 and adj and require(en_utilities_module).get_indefinite_article(adj) or
"একটি"
if langcode == "en" then
article = mw.getContentLanguage():ucfirst(article)
end
end
ins(article .. " ")
if #genders > 0 then
ins(table.concat(genders, " or ") .. " ")
end
if adj then
ins(adj .. " ")
end
ins("[[" .. iargs.type .. "]]")
local need_comma = false
if xlittext ~= "" then
ins(", " .. xlittext)
need_comma = true
end
local from_catparts = {}
if args.from then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("fromtype"))
local textseg, this_catparts = get_fromtext(lang, args)
for _, catpart in ipairs(this_catparts) do
m_table.insertIfNot(from_catparts, catpart)
end
ins(textseg)
end
if meaningtext ~= "" then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("meaningtype") .. "meaning " .. meaningtext)
end
if args.origin then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.origin .. " origin")
end
if args.usage then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.usage .. " usage")
end
if varoftext ~= "" then
ins(", " ..fetch_typetext("varoftype") .. "variant of " .. varoftext)
end
if clipoftext ~= "" then
ins(", " .. fetch_typetext("clipoftype") .. "clipping of " .. clipoftext)
end
if blendtext ~= "" then
ins(", " .. fetch_typetext("blendtype") .. "blend of " .. blendtext)
end
if args.popular then
ins(", " .. fetch_typetext("populartype") .. "popular " .. args.popular)
end
if mtext ~= "" then
ins(", " .. fetch_typetext("mtype") .. "masculine equivalent " .. mtext)
end
if ftext ~= "" then
ins(", " .. fetch_typetext("ftype") .. "feminine equivalent " .. ftext)
end
if eqtext ~= "" then
ins(", " .. fetch_typetext("eqtype") .. "equivalent to " .. eqtext)
end
if args.addl then
if args.addl:find("^;") then
ins(args.addl)
elseif args.addl:find("^_") then
ins(" " .. args.addl:sub(2))
else
ins(", " .. args.addl)
end
end
if varformtext ~= "" then
ins("; " .. fetch_typetext("varformtype") .. "variant form" ..
(numvarforms > 1) .. " " .. varformtext)
end
ins("</span>")
local text = table.concat(textsegs, "")
if args.nocat then
return text
end
local categories = {}
local langname = lang:getCanonicalName() .. " "
local function insert_cats(g)
g = g and g .. " " or ""
table.insert(categories, langname .. g .. iargs.type)
for _, catpart in ipairs(from_catparts) do
table.insert(categories, langname .. g .. iargs.type .. " from " .. catpart)
end
end
insert_cats(nil)
local function insert_cats_gender(g)
if g == "unknown-gender" then
return
end
if g == "common-gender" then
insert_cats_gender("পুরুষ")
insert_cats_gender("মহিলা")
end
insert_cats(g)
end
for _, g in ipairs(genders) do
insert_cats_gender(g)
end
return text .. m_utilities.format_categories(categories, lang, args.sort, nil, force_cat)
end
-- The entry point for {{name translit}}, {{name respelling}}, {{name obor}} and {{foreign name}}.
function export.name_translit(frame)
local boolean = {type = "boolean"}
local iargs = require("Module:parameters").process(frame.args, {
["desctext"] = {required = true},
["obor"] = boolean,
["foreign_name"] = boolean,
})
local parent_args = frame:getParent().args
local params = {
[1] = {required = true, type = "language", template_default = "en"},
[2] = {required = true, type = "language", sublist = true, template_default = "ru"},
[3] = {list = true, allow_holes = true},
["type"] = {required = true, set = translit_name_type_list, sublist = true, default = "patronymic"},
["dim"] = boolean,
["aug"] = boolean,
["nocap"] = boolean,
["addl"] = true,
["sort"] = true,
["pagename"] = true,
}
local m_param_utils = require(parameter_utilities_module)
local param_mods = m_param_utils.construct_param_mods {
{group = {"link", "q", "l", "ref"}},
{param = {"xlit", "eq"}},
}
local names, args = m_param_utils.parse_list_with_inline_modifiers_and_separate_params {
params = params,
param_mods = param_mods,
raw_args = parent_args,
termarg = 3,
track_module = "names/name translit",
disallow_custom_separators = true,
-- Use the first source language as the language of the specified names.
lang = function(args) return args[2][1] end,
sc = "sc.default",
}
local lang = args[1]
local sources = args[2]
local pagename = args.pagename or mw.loadData("Module:headword/data").pagename
local textsegs = {}
local function ins(txt)
table.insert(textsegs, txt)
end
ins("<span class='use-with-mention'>")
local desctext = iargs.desctext
if not args.nocap then
desctext = mw.getContentLanguage():ucfirst(desctext)
end
ins(desctext .. " ")
if not iargs.foreign_name then
ins("of ")
end
local langsegs = {}
for i, source in ipairs(sources) do
local sourcename = source:getCanonicalName()
local function get_source_link()
local term_to_link = names[1] and names[1].term or pagename
-- We link the language name to either the first specified name or the pagename, in the following
-- circumstances:
-- (1) More than one language was given along with at least one name; or
-- (2) We're handling {{foreign name}} or {{name obor}}, and no name was given.
-- The reason for (1) is that if more than one language was given, we want a link to the name
-- in each language, as the name that's displayed is linked only to the first specified language.
-- However, if only one language was given, linking the language to the name is redundant.
-- The reason for (2) is that {{foreign name}} is often used when the name in the destination language
-- is spelled the same as the name in the source language (e.g. [[Clinton]] or [[Obama]] in Italian),
-- and in that case no name will be explicitly specified but we still want a link to the name in the
-- source language. The reason we restrict this to {{foreign name}} or {{name obor}}, not to
-- {{name translit}} or {{name respelling}}, is that {{name translit}} and {{name respelling}} ought to be
-- used for names spelled differently in the destination language (either transliterated or respelled), so
-- assuming the pagename is the name in the source language is wrong.
if names[1] and #sources > 1 or (iargs.foreign_name or iargs.obor) and not names[1] then
return m_links.language_link{
lang = sources[i], term = term_to_link, alt = sourcename, tr = "-"
}
else
return sourcename
end
end
if i == 1 and not iargs.foreign_name then
-- If at least one name is given, we say "A transliteration of the LANG পদবি FOO", linking LANG to FOO.
-- Otherwise we say "A transliteration of a LANG পদবি".
if names[1] then
table.insert(langsegs, "the " .. get_source_link())
else
table.insert(langsegs, require(en_utilities_module).add_indefinite_article(sourcename))
end
else
table.insert(langsegs, get_source_link())
end
end
local langseg_text = m_table.serialCommaJoin(langsegs, {conj = "or"})
local augdim_text
if args.dim then
augdim_text = " [[diminutive]]"
elseif args.aug then
augdim_text = " [[augmentative]]"
else
augdim_text = ""
end
local nametype_text = m_table.serialCommaJoin(args.type) .. augdim_text
if not iargs.foreign_name then
ins(langseg_text .. " ")
ins(nametype_text)
if names[1] then
ins(" ")
end
else
ins(nametype_text)
ins(" in " .. langseg_text)
if names[1] then
ins(", ")
end
end
local linked_names = {}
local embedded_comma = false
for _, name in ipairs(names) do
local linked_name = m_links.full_link(name, "term")
if name.q and name.q[1] or name.qq and name.qq[1] or name.l and name.l[1] or name.ll and name.ll[1] or
name.refs and name.refs[1] then
linked_name = require(pron_qualifier_module).format_qualifiers {
lang = name.lang,
text = linked_name,
q = name.q,
qq = name.qq,
l = name.l,
ll = name.ll,
refs = name.refs,
raw = true,
}
end
if name.xlit then
embedded_comma = true
linked_name = linked_name .. ", " .. m_links.language_link { lang = enlang, term = name.xlit }
end
if name.eq then
embedded_comma = true
linked_name = linked_name .. ", equivalent to " .. m_links.language_link { lang = enlang, term = name.eq }
end
table.insert(linked_names, linked_name)
end
if embedded_comma then
ins(table.concat(linked_names, "; or of "))
else
ins(m_table.serialCommaJoin(linked_names, {conj = "or"}))
end
if args.addl then
if args.addl:find("^;") then
ins(args.addl)
elseif args.addl:find("^_") then
ins(" " .. args.addl:sub(2))
else
ins(", " .. args.addl)
end
end
ins("</span>")
local categories = {}
local function inscat(cat)
table.insert(categories, lang:getFullName() .. " " .. cat)
end
for _, nametype in ipairs(args.type) do
local function insert_cats(dimaugof)
local function insert_cats_type(ty)
if ty == "unisex প্রদত্ত নাম" then
insert_cats_type("পুরুষ প্রদত্ত নাম")
insert_cats_type("মহিলা প্রদত্ত নাম")
end
for _, source in ipairs(sources) do
inscat("renderings of " .. source:getCanonicalName() .. " " .. dimaugof .. ty)
inscat("terms derived from " .. source:getCanonicalName())
inscat("terms borrowed from " .. source:getCanonicalName())
if iargs.obor then
inscat("orthographic borrowings from " .. source:getCanonicalName())
end
if source:getCode() ~= source:getFullCode() then
-- etymology language
inscat("renderings of " .. source:getFullName() .. " " .. dimaugof .. ty)
end
end
end
insert_cats_type(nametype)
end
insert_cats("")
if args.dim then
insert_cats("diminutives of ")
end
if args.aug then
insert_cats("augmentatives of ")
end
end
return table.concat(textsegs, "") .. m_utilities.format_categories(categories, lang, args.sort, nil, force_cat)
end
return export
21gbw7qfcvtfsx977h0t2cl7popua32
507793
507780
2026-04-14T06:50:27Z
Redmin
6857
[[en:Module:names|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507793
Scribunto
text/plain
local export = {}
local m_languages = require("Module:languages")
local m_links = require("Module:links")
local m_utilities = require("Module:utilities")
local m_str_utils = require("Module:string utilities")
local m_table = require("Module:table")
local en_utilities_module = "Module:en-utilities"
local parameter_utilities_module = "Module:parameter utilities"
local parse_interface_module = "Module:parse interface"
local parse_utilities_module = "Module:parse utilities"
local pron_qualifier_module = "Module:pron qualifier"
local enlang = m_languages.getByCode("en")
local rsubn = m_str_utils.gsub
local rsplit = m_str_utils.split
local u = m_str_utils.char
local function rsub(str, from, to)
return (rsubn(str, from, to))
end
local TEMP_LESS_THAN = u(0xFFF2)
local force_cat = false -- for testing
--[=[
FIXME:
1. from=the Bible (DONE)
2. origin=18th century [DONE]
3. popular= (DONE)
4. varoftype= (DONE)
5. eqtype= [DONE]
6. dimoftype= [DONE]
7. from=de:Elisabeth (same language) (DONE)
8. blendof=, blendof2= [DONE]
9. varform, dimform [DONE]
10. from=English < Latin [DONE]
11. usage=rare -> categorize as rare?
12. dimeq= (also vareq=?) [DONE]
13. fromtype= [DONE]
14. <tr:...> and similar params [DONE]
]=]
-- Used in category code; name types which are full-word end-matching substrings of longer name types (e.g. "surnames"
-- of "male surnames", but not "male surnames" of "female surnames" because "male" only matches a part of the word
-- "female") should follow the longer name.
export.personal_name_types = {
"male surnames", "female surnames", "common-gender surnames", "surnames",
"patronymics", "matronymics",
}
export.personal_name_type_set = m_table.listToSet(export.personal_name_types)
export.given_name_genders = {
male = {type = "human"},
female = {type = "human"},
unisex = {type = "human", cat = {"male given names", "female given names", "unisex given names"}, article = "a"},
["unknown-gender"] = {type = "human", cat = {}, track = true},
animal = {type = "animal", track = true},
cat = {type = "animal"},
cow = {type = "animal"},
dog = {type = "animal"},
horse = {type = "animal"},
pig = {type = "animal"},
}
local function get_given_name_cats(gender, props)
local cats = props.cat
if not cats then
if props.type == "animal" then
cats = {gender .. " names"}
else
cats = {gender .. " given names"}
end
end
return cats
end
do
local function do_cat(cat)
if not export.personal_name_type_set[cat] then
export.personal_name_type_set[cat] = true
table.insert(export.personal_name_types, cat)
end
end
for gender, props in pairs(export.given_name_genders) do
local cats = get_given_name_cats(gender, props)
for _, cat in ipairs(cats) do
do_cat("diminutives of " .. cat)
do_cat("augmentatives of " .. cat)
do_cat(cat)
end
end
do_cat("given names")
end
local translit_name_type_list = {
"surname", "male given name", "female given name", "unisex given name",
"patronymic"
}
local function track(page)
require("Module:debug").track("names/" .. page)
end
-- Get raw text, for use in computing the indefinite article. Use get_plaintext() in [[Module:utilities]] and also
-- remove parens that may surround qualifier or label text preceding a term.
local function get_rawtext(text)
text = m_utilities.get_plaintext(text)
text = text:gsub("[()%[%]]", "")
return text
end
--[=[
Parse a term and associated properties. This works with parameters of the form 'Karlheinz' or
'Kunigunde<q:medieval, now rare>' or 'non:Óláfr' or 'ru:Фру́нзе<tr:Frúnzɛ><q:rare>' where the modifying properties
are contained in <...> specifications after the term. `term` is the full parameter value including any angle brackets
and colons; `paramname` is the name of the parameter that this value comes from, for error purposes; `deflang` is a
language object used in the return value when the language isn't specified (e.g. in the examples 'Karlheinz' and
'Kunigunde<q:medieval, now rare>' above); `allow_explicit_lang` indicates whether the language can be explicitly given
(e.g. in the examples 'non:Óláfr' or 'ru:Фру́нзе<tr:Frúnzɛ><q:rare>' above).
Normally the return value is a terminfo object that can be passed to full_link() in [[Module:links]]), additionally
with optional fields `.q`, `.qq`, `.l`, `.ll`, `.refs` and `.eq` (a list of objects of the same form as the returned
terminfo object. However, if `allow_multiple_terms` is given, multiple comma-separated names can be given in `term`,
and the return value is a list of objects of the form described just above.
]=]
local function parse_term_with_annotations(term, paramname, deflang, allow_explicit_lang, allow_multiple_terms)
local param_mods = require(parameter_utilities_module).construct_param_mods {
{group = {"link", "l", "q", "ref"}},
{param = "eq", convert = function(eqval, parse_err)
return parse_term_with_annotations(eqval, paramname .. ".eq", enlang, false, "allow multiple terms")
end},
}
local function generate_obj(term, parse_err)
local termlang
if allow_explicit_lang then
local actual_term
actual_term, termlang = require(parse_interface_module).parse_term_with_lang {
term = term,
parse_err = parse_err,
paramname = paramname,
}
term = actual_term or term
end
return {
term = term,
lang = termlang or deflang,
}
end
return require(parse_interface_module).parse_inline_modifiers(term, {
param_mods = param_mods,
paramname = paramname,
generate_obj = generate_obj,
splitchar = allow_multiple_terms and "," or nil,
})
end
--[=[
Link a single term. If `do_language_link` is given and a given term's language is English, the link will be constructed
using language_link() in [[Module:links]]; otherwise, with full_link(). `termobj` is an object as returned by
parse_term_with_annotations(), i.e. it is suitable for passing to [[Module:links]] and additionally contains optional
fields `.q`, `.qq`, `.l`, `.ll`, `.refs` and `.eq` (a list of objects of the same form as `termobj`).
]=]
local function link_one_term(termobj, do_language_link)
local link
if do_language_link and termobj.lang:getCode() == "en" then
link = m_links.language_link(termobj)
else
link = m_links.full_link(termobj)
end
if termobj.q and termobj.q[1] or termobj.qq and termobj.qq[1] or
termobj.l and termobj.l[1] or termobj.ll and termobj.ll[1] or termobj.refs and termobj.refs[1] then
link = require(pron_qualifier_module).format_qualifiers {
lang = termobj.lang,
text = link,
q = termobj.q,
qq = termobj.qq,
l = termobj.l,
ll = termobj.ll,
refs = termobj.refs,
}
end
if termobj.eq then
local eqtext = {}
for _, eqobj in ipairs(termobj.eq) do
table.insert(eqtext, link_one_term(eqobj, true))
end
link = link .. " [=" .. m_table.serialCommaJoin(eqtext, {conj = "or"}) .. "]"
end
return link
end
--[=[
Link the terms in `terms`, and join them using the conjunction in `conj` (defaulting to "or"). Joining is done using
serialCommaJoin() in [[Module:table]], so that e.g. two terms are joined as "TERM or TERM" while three terms are joined
as "TERM, TERM or TERM" with special CSS spans before the final "or" to allow an "Oxford comma" to appear if configured
appropriately. (However, if `conj` is the special value ", ", joining is done directly using that value.)
If `include_langname` is given, the language of the first term will be prepended to the joined terms. If
`do_language_link` is given and a given term's language is English, the link will be constructed using language_link()
in [[Module:links]]; otherwise, with full_link(). Each term in `terms` is an object as returned by
parse_term_with_annotations().
]=]
local function join_terms(terms, include_langname, do_language_link, conj)
local links = {}
local langnametext
for _, termobj in ipairs(terms) do
if include_langname and not langnametext then
langnametext = termobj.lang:getCanonicalName() .. " "
end
table.insert(links, link_one_term(termobj, do_language_link))
end
local joined_terms
if conj == ", " then
joined_terms = table.concat(links, conj)
else
joined_terms = m_table.serialCommaJoin(links, {conj = conj or "or"})
end
return (langnametext or "") .. joined_terms
end
--[=[
Gather the parameters for multiple names and link each name using full_link() (for foreign names) or language_link()
(for English names), joining the names using serialCommaJoin() in [[Module:table]] with the conjunction `conj`
(defaulting to "or"). (However, if `conj` is the special value ", ", joining is done directly using that value.)
This can be used, for example, to fetch and join all the masculine equivalent names for a feminine given name. Each
name is specified using parameters beginning with `pname` in `args`, e.g. "m", "m2", "m3", etc. `lang` is a language
object specifying the language of the names (defaulting to English), for use in linking them. If `allow_explicit_lang`
is given, the language of the terms can be specified explicitly by prefixing a term with a language code, e.g.
'sv:Björn' or 'la:[[Nicolaus|Nīcolāī]]'. This function assumes that the parameters have already been parsed by
[[Module:parameters]] and gathered into lists, so that e.g. all "mN" parameters are in a list in args["m"].
]=]
local function join_names(lang, args, pname, conj, allow_explicit_lang)
local termobjs = {}
local do_language_link = false
if not lang then
lang = enlang
do_language_link = true
end
local function process_one_term(term, i)
for _, termobj in ipairs(parse_term_with_annotations(term, pname .. (i == 1 and "" or i), lang,
allow_explicit_lang, "allow multiple terms")) do
table.insert(termobjs, termobj)
end
end
if not args[pname] then
return "", 0
elseif type(args[pname]) == "table" then
for i, term in ipairs(args[pname]) do
process_one_term(term, i)
end
else
process_one_term(args[pname], 1)
end
return join_terms(termobjs, nil, do_language_link, conj), #termobjs
end
local function get_eqtext(args)
local eqsegs = {}
local lastlang = nil
local last_eqseg = {}
local function process_one_term(term, i)
for _, termobj in ipairs(parse_term_with_annotations(term, "eq" .. (i == 1 and "" or i), enlang,
"allow explicit lang", "allow multiple terms")) do
local termlang = termobj.lang:getCode()
if lastlang and lastlang ~= termlang then
if #last_eqseg > 0 then
table.insert(eqsegs, last_eqseg)
end
last_eqseg = {}
end
lastlang = termlang
table.insert(last_eqseg, termobj)
end
end
if type(args.eq) == "table" then
for i, term in ipairs(args.eq) do
process_one_term(term, i)
end
elseif type(args.eq) == "string" then
process_one_term(args.eq, 1)
end
if #last_eqseg > 0 then
table.insert(eqsegs, last_eqseg)
end
local eqtextsegs = {}
for _, eqseg in ipairs(eqsegs) do
table.insert(eqtextsegs, join_terms(eqseg, "include langname"))
end
return m_table.serialCommaJoin(eqtextsegs, {conj = "or"})
end
local function get_fromtext(lang, args)
local catparts = {}
local fromsegs = {}
local i = 1
local function parse_from(from)
local unrecognized = false
local prefix, suffix
if from == "surnames" or from == "given names" or from == "nicknames" or from == "place names" or from == "common nouns" or from == "month names" then
prefix = "transferred from the "
suffix = from:gsub("s$", "")
table.insert(catparts, from)
elseif from == "patronymics" or from == "matronymics" or from == "coinages" then
prefix = "originating "
suffix = "as a " .. from:gsub("s$", "")
table.insert(catparts, from)
elseif from == "occupations" or from == "ethnonyms" then
prefix = "originating "
suffix = "as an " .. from:gsub("s$", "")
table.insert(catparts, from)
elseif from == "the Bible" then
prefix = "originating "
suffix = "from the Bible"
table.insert(catparts, from)
else
prefix = "from "
if from:find(":") then
local termobj = parse_term_with_annotations(from, "from" .. (i == 1 and "" or i), lang,
"allow explicit lang")
local fromlangname = ""
if termobj.lang:getCode() ~= lang:getCode() then
-- If name is derived from another name in the same language, don't include lang name after text
-- "from " or create a category like "German male given names derived from German".
local canonical_name = termobj.lang:getCanonicalName()
fromlangname = canonical_name .. " "
table.insert(catparts, canonical_name)
end
suffix = fromlangname .. link_one_term(termobj)
else
local family = from:match("^(.+) languages$") or
from:match("^.+ Languages$") or
from:match("^.+ [Ll]ects$")
if family then
if require("Module:families").getByCanonicalName(family) then
table.insert(catparts, from)
else
unrecognized = true
end
suffix = "the " .. from
else
if m_languages.getByCanonicalName(from, nil, "allow etym") then
table.insert(catparts, from)
else
unrecognized = true
end
suffix = from
end
end
end
if unrecognized then
track("unrecognized from")
track("unrecognized from/" .. from)
end
return prefix, suffix
end
local last_fromseg = nil
local put = require(parse_utilities_module)
local from_args = args.from or {}
if type(from_args) == "string" then
from_args = {from_args}
end
while from_args[i] do
-- We may have multiple comma-separated items, each of which may have multiple items separated by a
-- space-delimited < sign, each of which may have inline modifiers with embedded commas in them. To handle
-- this correctly, first replace space-delimited < signs with a special character, then split on balanced
-- <...> and [...] signs, then split on comma, then rejoin the stuff between commas. We will then split on
-- TEMP_LESS_THAN (the replacement for space-delimited < signs) and reparse.
local rawfroms = rsub(from_args[i], "%s+<%s+", TEMP_LESS_THAN)
local segments = put.parse_multi_delimiter_balanced_segment_run(rawfroms, {{"<", ">"}, {"[", "]"}})
local comma_separated_groups = put.split_alternating_runs_on_comma(segments)
for j, comma_separated_group in ipairs(comma_separated_groups) do
comma_separated_groups[j] = table.concat(comma_separated_group)
end
for _, rawfrom in ipairs(comma_separated_groups) do
local froms = rsplit(rawfrom, TEMP_LESS_THAN)
if #froms == 1 then
local prefix, suffix = parse_from(froms[1])
if last_fromseg and (last_fromseg.has_multiple_froms or last_fromseg.prefix ~= prefix) then
table.insert(fromsegs, last_fromseg)
last_fromseg = nil
end
if not last_fromseg then
last_fromseg = {prefix = prefix, suffixes = {}}
end
table.insert(last_fromseg.suffixes, suffix)
else
if last_fromseg then
table.insert(fromsegs, last_fromseg)
last_fromseg = nil
end
local first_suffixpart = ""
local rest_suffixparts = {}
for j, from in ipairs(froms) do
local prefix, suffix = parse_from(from)
if j == 1 then
first_suffixpart = prefix .. suffix
else
table.insert(rest_suffixparts, prefix .. suffix)
end
end
local full_suffix = first_suffixpart .. " [in turn " .. table.concat(rest_suffixparts, ", in turn ") .. "]"
last_fromseg = {prefix = "", has_multiple_froms = true, suffixes = {full_suffix}}
end
end
i = i + 1
end
table.insert(fromsegs, last_fromseg)
local fromtextsegs = {}
for _, fromseg in ipairs(fromsegs) do
table.insert(fromtextsegs, fromseg.prefix .. m_table.serialCommaJoin(fromseg.suffixes, {conj = "or"}))
end
return m_table.serialCommaJoin(fromtextsegs, {conj = "or"}), catparts
end
local function parse_given_name_genders(genderspec)
if export.given_name_genders[genderspec] then -- optimization
return {{
type = genderspec,
props = export.given_name_genders[genderspec],
}}, export.given_name_genders[genderspec].type == "animal"
end
local genders = {}
local is_animal = nil
local param_mods = require(parameter_utilities_module).construct_param_mods {
{group = {"l", "q", "ref"}},
{param = {"text", "article"}},
}
local function generate_obj(term, parse_err)
if not export.given_name_genders[term] then
local valid_genders = {}
for k, _ in pairs(export.given_name_genders) do
table.insert(valid_genders, k)
end
table.sort(valid_genders)
parse_err(("Unrecognized gender '%s': valid genders are %s"):format(
term, table.concat(valid_genders, ", ")))
end
return {
type = term,
props = export.given_name_genders[term],
}
end
local retval = require(parse_interface_module).parse_inline_modifiers(genderspec, {
param_mods = param_mods,
paramname = "2",
generate_obj = generate_obj,
splitchar = ",",
})
for _, spec in ipairs(retval) do
local this_is_animal = spec.props.type == "animal"
if is_animal == nil then
is_animal = this_is_animal
elseif is_animal ~= this_is_animal then
error("Can't mix animal and human genders")
end
end
return retval, is_animal
end
local function generate_given_name_genders(lang, genders)
local parts = {}
for _, spec in ipairs(genders) do
local text
if spec.text then
-- NOTE: This assumes no % sign in the gender type, which seems safe.
text = spec.text:gsub("%+", spec.type)
else
if spec.props.type == "animal" then
text = "[[" .. spec.type .. "]]"
else
text = spec.type
end
end
if spec.q and spec.q[1] or spec.qq and spec.qq[1] or spec.l and spec.l[1] or spec.ll and spec.ll[1] or
spec.refs and spec.refs[1] then
text = require(pron_qualifier_module).format_qualifiers {
lang = lang,
text = text,
q = spec.q,
qq = spec.qq,
l = spec.l,
ll = spec.ll,
refs = spec.refs,
raw = true,
}
end
table.insert(parts, text)
end
local retval = m_table.serialCommaJoin(parts, {conj = "or"})
local article = genders[1].article
if not article and not genders[1].text and not genders[1].q and not genders[1].l then
article = genders[1].props.article
end
if not article then
article = require(en_utilities_module).get_indefinite_article(get_rawtext(retval))
end
return retval, article
end
-- The entry point for {{given name}}.
function export.given_name(frame)
local parent_args = frame:getParent().args
local compat = parent_args.lang
local offset = compat and 0 or 1
local lang_index = compat and "lang" or 1
local list = {list = true}
local args = require("Module:parameters").process(parent_args, {
[lang_index] = {required = true, type = "language", default = "und"},
["gender"] = {default = "unknown-gender"},
[1 + offset] = {alias_of = "gender"},
["usage"] = true,
["origin"] = true,
["popular"] = true,
["populartype"] = true,
["meaning"] = list,
["meaningtype"] = true,
["addl"] = true,
-- initial article: A or An
["A"] = true,
["sort"] = true,
["from"] = true,
[2 + offset] = {alias_of = "from"},
["fromtype"] = true,
["xlit"] = true,
["eq"] = true,
["eqtype"] = true,
["varof"] = true,
["varoftype"] = true,
["var"] = {alias_of = "varof"},
["vartype"] = {alias_of = "varoftype"},
["varform"] = true,
["varformtype"] = true,
["dimof"] = true,
["dimoftype"] = true,
["dim"] = {alias_of = "dimof"},
["dimtype"] = {alias_of = "dimoftype"},
["dimform"] = true,
["dimformtype"] = true,
["augof"] = true,
["augoftype"] = true,
["aug"] = {alias_of = "augof"},
["augtype"] = {alias_of = "augoftype"},
["augform"] = true,
["augformtype"] = true,
["clipof"] = true,
["clipoftype"] = true,
["blend"] = true,
["blendtype"] = true,
["m"] = true,
["mtype"] = true,
["f"] = true,
["ftype"] = true,
})
local textsegs = {}
local lang = args[lang_index]
local langcode = lang:getCode()
local function fetch_typetext(param)
return args[param] and args[param] .. " " or ""
end
local genders, is_animal = parse_given_name_genders(args.gender)
local dimoftext, numdimofs = join_names(lang, args, "dimof")
local augoftext, numaugofs = join_names(lang, args, "augof")
local xlittext = join_names(nil, args, "xlit")
local blendtext = join_names(lang, args, "blend", "and")
local varoftext = join_names(lang, args, "varof")
local clipoftext = join_names(lang, args, "clipof")
local mtext = join_names(lang, args, "m")
local ftext = join_names(lang, args, "f")
local varformtext, numvarforms = join_names(lang, args, "varform", ", ")
local dimformtext, numdimforms = join_names(lang, args, "dimform", ", ")
local augformtext, numaugforms = join_names(lang, args, "augform", ", ")
local meaningsegs = {}
for _, meaning in ipairs(args.meaning) do
table.insert(meaningsegs, '“' .. meaning .. '”')
end
local meaningtext = m_table.serialCommaJoin(meaningsegs, {conj = "or"})
local eqtext = get_eqtext(args)
local function ins(txt)
table.insert(textsegs, txt)
end
local dimoftype = args.dimoftype
local augoftype = args.augoftype
added_text = nil
if numdimofs > 0 then
added_text = (dimoftype and dimoftype .. " " or "") .. "[[diminutive]]" ..
(xlittext ~= "" and ", " .. xlittext .. "," or "") .. " of "
elseif numaugofs > 0 then
added_text = (augoftype and augoftype .. " " or "") .. "[[augmentative]]" ..
(xlittext ~= "" and ", " .. xlittext .. "," or "") .. " of "
end
force_plural = false
if added_text ~= nil then
if args.dimof == "-" then
dimoftext = ""
force_plural = true
else
added_text = added_text .. "the "
end
ins(added_text)
end
local article = args.A
if not article and textsegs[1] then
article = require(en_utilities_module).get_indefinite_article(textsegs[1])
end
if not is_animal then
local gendertext, gender_article = generate_given_name_genders(lang, genders)
article = article or gender_article
ins(gendertext)
ins(" ")
end
ins((numdimofs > 1 or numaugofs > 1 or force_plural) and "[[given name|given names]]" or "[[given name]]")
article = article or "a" -- if no article set yet, it's "a" based on "given name"
if langcode == "en" then
article = mw.getContentLanguage():ucfirst(article)
end
local need_comma = false
if numdimofs > 0 then
ins(" " .. dimoftext)
need_comma = not is_animal
elseif numaugofs > 0 then
ins(" " .. augoftext)
need_comma = not is_animal
elseif xlittext ~= "" then
ins(", " .. xlittext)
need_comma = true
end
if is_animal then
if need_comma then
ins(",")
end
need_comma = true
ins(" for ")
local gendertext, gender_article = generate_given_name_genders(lang, genders)
ins(gender_article)
ins(" ")
ins(gendertext)
end
local from_catparts = {}
if args.from then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("fromtype"))
local textseg, this_catparts = get_fromtext(lang, args)
for _, catpart in ipairs(this_catparts) do
m_table.insertIfNot(from_catparts, catpart)
end
ins(textseg)
end
if meaningtext ~= "" then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("meaningtype") .. "meaning " .. meaningtext)
end
if args.origin then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.origin .. " origin")
end
if args.usage then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.usage .. " usage")
end
if varoftext ~= "" then
ins(", " ..fetch_typetext("varoftype") .. "variant of " .. varoftext)
end
if clipoftext ~= "" then
ins(", " .. fetch_typetext("clipoftype") .. "clipping of " .. clipoftext)
end
if blendtext ~= "" then
ins(", " .. fetch_typetext("blendtype") .. "blend of " .. blendtext)
end
if args.popular then
ins(", " .. fetch_typetext("populartype") .. "popular " .. args.popular)
end
if mtext ~= "" then
ins(", " .. fetch_typetext("mtype") .. "masculine equivalent " .. mtext)
end
if ftext ~= "" then
ins(", " .. fetch_typetext("ftype") .. "feminine equivalent " .. ftext)
end
if eqtext ~= "" then
ins(", " .. fetch_typetext("eqtype") .. "equivalent to " .. eqtext)
end
if args.addl then
if args.addl:find("^;") then
ins(args.addl)
elseif args.addl:find("^_") then
ins(" " .. args.addl:sub(2))
else
ins(", " .. args.addl)
end
end
if varformtext ~= "" then
ins("; " .. fetch_typetext("varformtype") .. "variant form" .. (numvarforms > 1 and "s" or "") .. " " ..
varformtext)
end
if dimformtext ~= "" then
ins("; " .. fetch_typetext("dimformtype") .. "diminutive form" .. (numdimforms > 1 and "s" or "") .. " " ..
dimformtext)
end
if augformtext ~= "" then
ins("; " .. fetch_typetext("augformtype") .. "augmentative form" .. (numaugforms > 1 and "s" or "") .. " " ..
augformtext)
end
textsegs = "<span class='use-with-mention'>" .. article .. " " .. table.concat(textsegs) .. "</span>"
local categories = {}
local langname = lang:getCanonicalName() .. " "
local function insert_cats(dimaugof)
if dimaugof == "" and genders[1].props.type == "human" then
-- No category such as "English diminutives of given names"
table.insert(categories, langname .. "given names")
end
local function insert_cat(cat)
table.insert(categories, langname .. dimaugof .. cat)
for _, catpart in ipairs(from_catparts) do
table.insert(categories, langname .. dimaugof .. cat .. " from " .. catpart)
end
end
for _, spec in ipairs(genders) do
local typ = spec.type
if spec.props.track then
track(typ)
end
local cats = get_given_name_cats(spec.type, spec.props)
for _, cat in ipairs(cats) do
insert_cat(cat)
end
end
end
insert_cats("")
if numdimofs > 0 then
insert_cats("diminutives of ")
elseif numaugofs > 0 then
insert_cats("augmentatives of ")
end
return textsegs .. m_utilities.format_categories(categories, lang, args.sort, nil, force_cat)
end
-- The entry point for {{surname}}, {{patronymic}} and {{matronymic}}.
function export.surname(frame)
local iargs = require("Module:parameters").process(frame.args, {
["type"] = {required = true, set = {"surname", "patronymic", "matronymic"}},
})
local parent_args = frame:getParent().args
local compat = parent_args.lang
local offset = compat and 0 or 1
if parent_args.dot or parent_args.nodot then
error("dot= and nodot= are no longer supported in [[Template:" .. iargs.type .. "]] because a trailing " ..
"period is no longer added by default; if you want it, add it explicitly after the template")
end
local lang_index = compat and "lang" or 1
local list = {list = true}
local gender_arg = iargs.type == "surname" and "g" or 1 + offset
local adj_arg = iargs.type == "surname" and 1 + offset or 2 + offset
local args = require("Module:parameters").process(parent_args, {
[lang_index] = {required = true, type = "language", template_default = "und"},
[gender_arg] = iargs.type == "surname" and true or {required = true, template_default = "unknown"}, -- gender(s)
[adj_arg] = true, -- adjective/qualifier
["usage"] = true,
["origin"] = true,
["popular"] = true,
["populartype"] = true,
["meaning"] = list,
["meaningtype"] = true,
["parent"] = true,
["addl"] = true,
-- initial article: by default A or An (English), a or an (otherwise)
["A"] = true,
["sort"] = true,
["from"] = true,
["fromtype"] = true,
["xlit"] = true,
["eq"] = true,
["eqtype"] = true,
["varof"] = true,
["varoftype"] = true,
["var"] = {alias_of = "varof"},
["vartype"] = {alias_of = "varoftype"},
["varform"] = true,
["varformtype"] = true,
["clipof"] = true,
["clipoftype"] = true,
["blend"] = true,
["blendtype"] = true,
["m"] = true,
["mtype"] = true,
["f"] = true,
["ftype"] = true,
["nocat"] = {type = "boolean"},
})
local textsegs = {}
local lang = args[lang_index]
local langcode = lang:getCode()
local function fetch_typetext(param)
return args[param] and args[param] .. " " or ""
end
local saw_male = false
local saw_female = false
local genders = {}
if args[gender_arg] then
for _, g in ipairs(require(parse_interface_module).split_on_comma(args[gender_arg])) do
if g == "unknown" or g == "unknown gender" or g == "unknown-gender" or g == "?" then
g = "unknown-gender"
track("unknown gender")
elseif g == "unisex" or g == "common gender" or g == "common-gender" or g == "c" then
g = "common-gender"
saw_male = true
saw_female = true
elseif g == "m" or g == "male" then
g = "male"
saw_male = true
elseif g == "f" or g == "female" then
g = "female"
saw_female = true
else
error("Unrecognized gender: " .. g)
end
table.insert(genders, g)
end
end
local adj = args[adj_arg]
local xlittext = join_names(nil, args, "xlit")
local blendtext = join_names(lang, args, "blend", "and")
local varoftext = join_names(lang, args, "varof")
local clipoftext = join_names(lang, args, "clipof")
local mtext = join_names(lang, args, "m")
local ftext = join_names(lang, args, "f")
local parenttext = join_names(lang, args, "parent", nil, "allow explicit lang")
local varformtext, numvarforms = join_names(lang, args, "varform", ", ")
local meaningsegs = {}
for _, meaning in ipairs(args.meaning) do
table.insert(meaningsegs, '“' .. meaning .. '”')
end
if parenttext ~= "" then
local child = saw_male and not saw_female and "son" or saw_female and not saw_male and "daughter" or
"son/daughter"
table.insert(meaningsegs, ("“%s of %s”"):format(child, parenttext))
end
local meaningtext = m_table.serialCommaJoin(meaningsegs, {conj = "or"})
local eqtext = get_eqtext(args)
local function ins(txt)
table.insert(textsegs, txt)
end
ins("<span class='use-with-mention'>")
-- If gender is supplied, it goes before the specified adjective in adj=. The only value of gender that uses "an" is
-- "unknown-gender" (note that "unisex" wouldn't use it but in any case we map "unisex" to "common-gender"). If gender
-- isn't supplied, look at the first letter of the value of adj= if supplied; otherwise, the article is always "a"
-- because the word "surname", "patronymic" or "matronymic" follows. Capitalize "A"/"An" if English.
local article
if args.A then
article = args.A
else
article = #genders > 0 and genders[1] == "unknown-gender" and "an" or
#genders == 0 and adj and require(en_utilities_module).get_indefinite_article(adj) or
"a"
if langcode == "en" then
article = mw.getContentLanguage():ucfirst(article)
end
end
ins(article .. " ")
if #genders > 0 then
ins(table.concat(genders, " or ") .. " ")
end
if adj then
ins(adj .. " ")
end
ins("[[" .. iargs.type .. "]]")
local need_comma = false
if xlittext ~= "" then
ins(", " .. xlittext)
need_comma = true
end
local from_catparts = {}
if args.from then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("fromtype"))
local textseg, this_catparts = get_fromtext(lang, args)
for _, catpart in ipairs(this_catparts) do
m_table.insertIfNot(from_catparts, catpart)
end
ins(textseg)
end
if meaningtext ~= "" then
if need_comma then
ins(",")
end
need_comma = true
ins(" " .. fetch_typetext("meaningtype") .. "meaning " .. meaningtext)
end
if args.origin then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.origin .. " origin")
end
if args.usage then
if need_comma then
ins(",")
end
need_comma = true
ins(" of " .. args.usage .. " usage")
end
if varoftext ~= "" then
ins(", " ..fetch_typetext("varoftype") .. "variant of " .. varoftext)
end
if clipoftext ~= "" then
ins(", " .. fetch_typetext("clipoftype") .. "clipping of " .. clipoftext)
end
if blendtext ~= "" then
ins(", " .. fetch_typetext("blendtype") .. "blend of " .. blendtext)
end
if args.popular then
ins(", " .. fetch_typetext("populartype") .. "popular " .. args.popular)
end
if mtext ~= "" then
ins(", " .. fetch_typetext("mtype") .. "masculine equivalent " .. mtext)
end
if ftext ~= "" then
ins(", " .. fetch_typetext("ftype") .. "feminine equivalent " .. ftext)
end
if eqtext ~= "" then
ins(", " .. fetch_typetext("eqtype") .. "equivalent to " .. eqtext)
end
if args.addl then
if args.addl:find("^;") then
ins(args.addl)
elseif args.addl:find("^_") then
ins(" " .. args.addl:sub(2))
else
ins(", " .. args.addl)
end
end
if varformtext ~= "" then
ins("; " .. fetch_typetext("varformtype") .. "variant form" ..
(numvarforms > 1 and "s" or "") .. " " .. varformtext)
end
ins("</span>")
local text = table.concat(textsegs, "")
if args.nocat then
return text
end
local categories = {}
local langname = lang:getCanonicalName() .. " "
local function insert_cats(g)
g = g and g .. " " or ""
table.insert(categories, langname .. g .. iargs.type .. "s")
for _, catpart in ipairs(from_catparts) do
table.insert(categories, langname .. g .. iargs.type .. "s from " .. catpart)
end
end
insert_cats(nil)
local function insert_cats_gender(g)
if g == "unknown-gender" then
return
end
if g == "common-gender" then
insert_cats_gender("male")
insert_cats_gender("female")
end
insert_cats(g)
end
for _, g in ipairs(genders) do
insert_cats_gender(g)
end
return text .. m_utilities.format_categories(categories, lang, args.sort, nil, force_cat)
end
-- The entry point for {{name translit}}, {{name respelling}}, {{name obor}} and {{foreign name}}.
function export.name_translit(frame)
local boolean = {type = "boolean"}
local iargs = require("Module:parameters").process(frame.args, {
["desctext"] = {required = true},
["obor"] = boolean,
["foreign_name"] = boolean,
})
local parent_args = frame:getParent().args
local params = {
[1] = {required = true, type = "language", template_default = "en"},
[2] = {required = true, type = "language", sublist = true, template_default = "ru"},
[3] = {list = true, allow_holes = true},
["type"] = {required = true, set = translit_name_type_list, sublist = true, default = "patronymic"},
["dim"] = boolean,
["aug"] = boolean,
["nocap"] = boolean,
["addl"] = true,
["sort"] = true,
["pagename"] = true,
}
local m_param_utils = require(parameter_utilities_module)
local param_mods = m_param_utils.construct_param_mods {
{group = {"link", "q", "l", "ref"}},
{param = {"xlit", "eq"}},
}
local names, args = m_param_utils.parse_list_with_inline_modifiers_and_separate_params {
params = params,
param_mods = param_mods,
raw_args = parent_args,
termarg = 3,
track_module = "names/name translit",
disallow_custom_separators = true,
-- Use the first source language as the language of the specified names.
lang = function(args) return args[2][1] end,
sc = "sc.default",
}
local lang = args[1]
local langcode = lang:getCode()
local sources = args[2]
local pagename = args.pagename or mw.loadData("Module:headword/data").pagename
local textsegs = {}
local function ins(txt)
table.insert(textsegs, txt)
end
ins("<span class='use-with-mention'>")
local desctext = iargs.desctext
if langcode == "en" and not args.nocap then
desctext = mw.getContentLanguage():ucfirst(desctext)
end
ins(desctext .. " ")
if not iargs.foreign_name then
ins("of ")
end
local langsegs = {}
for i, source in ipairs(sources) do
local sourcename = source:getCanonicalName()
local function get_source_link()
local term_to_link = names[1] and names[1].term or pagename
-- We link the language name to either the first specified name or the pagename, in the following
-- circumstances:
-- (1) More than one language was given along with at least one name; or
-- (2) We're handling {{foreign name}} or {{name obor}}, and no name was given.
-- The reason for (1) is that if more than one language was given, we want a link to the name
-- in each language, as the name that's displayed is linked only to the first specified language.
-- However, if only one language was given, linking the language to the name is redundant.
-- The reason for (2) is that {{foreign name}} is often used when the name in the destination language
-- is spelled the same as the name in the source language (e.g. [[Clinton]] or [[Obama]] in Italian),
-- and in that case no name will be explicitly specified but we still want a link to the name in the
-- source language. The reason we restrict this to {{foreign name}} or {{name obor}}, not to
-- {{name translit}} or {{name respelling}}, is that {{name translit}} and {{name respelling}} ought to be
-- used for names spelled differently in the destination language (either transliterated or respelled), so
-- assuming the pagename is the name in the source language is wrong.
if names[1] and #sources > 1 or (iargs.foreign_name or iargs.obor) and not names[1] then
return m_links.language_link{
lang = sources[i], term = term_to_link, alt = sourcename, tr = "-"
}
else
return sourcename
end
end
if i == 1 and not iargs.foreign_name then
-- If at least one name is given, we say "A transliteration of the LANG surname FOO", linking LANG to FOO.
-- Otherwise we say "A transliteration of a LANG surname".
if names[1] then
table.insert(langsegs, "the " .. get_source_link())
else
table.insert(langsegs, require(en_utilities_module).add_indefinite_article(sourcename))
end
else
table.insert(langsegs, get_source_link())
end
end
local langseg_text = m_table.serialCommaJoin(langsegs, {conj = "or"})
local augdim_text
if args.dim then
augdim_text = " [[diminutive]]"
elseif args.aug then
augdim_text = " [[augmentative]]"
else
augdim_text = ""
end
local nametype_linked = {}
for _, nametype in ipairs(args["type"]) do
if nametype == "surname" or nametype == "patronymic" then
table.insert(nametype_linked, "[[" .. nametype .. "]]")
elseif nametype == "male given name" then
table.insert(nametype_linked, "male [[given name]]")
elseif nametype == "female given name" then
table.insert(nametype_linked, "female [[given name]]")
elseif nametype == "unisex given name" then
table.insert(nametype_linked, "unisex [[given name]]")
else
table.insert(nametype_linked, nametype)
end
end
local nametype_text = m_table.serialCommaJoin(nametype_linked) .. augdim_text
if not iargs.foreign_name then
ins(langseg_text .. " ")
ins(nametype_text)
if names[1] then
ins(" ")
end
else
ins(nametype_text)
ins(" in " .. langseg_text)
if names[1] then
ins(", ")
end
end
local linked_names = {}
local embedded_comma = false
for _, name in ipairs(names) do
local linked_name = m_links.full_link(name, "term")
if name.q and name.q[1] or name.qq and name.qq[1] or name.l and name.l[1] or name.ll and name.ll[1] or
name.refs and name.refs[1] then
linked_name = require(pron_qualifier_module).format_qualifiers {
lang = name.lang,
text = linked_name,
q = name.q,
qq = name.qq,
l = name.l,
ll = name.ll,
refs = name.refs,
raw = true,
}
end
if name.xlit then
embedded_comma = true
linked_name = linked_name .. ", " .. m_links.language_link { lang = enlang, term = name.xlit }
end
if name.eq then
embedded_comma = true
linked_name = linked_name .. ", equivalent to " .. m_links.language_link { lang = enlang, term = name.eq }
end
table.insert(linked_names, linked_name)
end
if embedded_comma then
ins(table.concat(linked_names, "; or of "))
else
ins(m_table.serialCommaJoin(linked_names, {conj = "or"}))
end
if args.addl then
if args.addl:find("^;") then
ins(args.addl)
elseif args.addl:find("^_") then
ins(" " .. args.addl:sub(2))
else
ins(", " .. args.addl)
end
end
ins("</span>")
local categories = {}
local function inscat(cat)
table.insert(categories, lang:getFullName() .. " " .. cat)
end
for _, nametype in ipairs(args.type) do
local function insert_cats(dimaugof)
local function insert_cats_type(ty)
if ty == "unisex given name" then
insert_cats_type("male given name")
insert_cats_type("female given name")
end
for _, source in ipairs(sources) do
inscat("renderings of " .. source:getCanonicalName() .. " " .. dimaugof .. ty .. "s")
inscat("terms derived from " .. source:getCanonicalName())
inscat("terms borrowed from " .. source:getCanonicalName())
if iargs.obor then
inscat("orthographic borrowings from " .. source:getCanonicalName())
end
if source:getCode() ~= source:getFullCode() then
-- etymology language
inscat("renderings of " .. source:getFullName() .. " " .. dimaugof .. ty .. "s")
end
end
end
insert_cats_type(nametype)
end
insert_cats("")
if args.dim then
insert_cats("diminutives of ")
end
if args.aug then
insert_cats("augmentatives of ")
end
end
return table.concat(textsegs, "") .. m_utilities.format_categories(categories, lang, args.sort, nil, force_cat)
end
return export
8vae4dokcdrk38udmaaramr6533e522
মডিউল:pron qualifier
828
70443
507805
323957
2026-04-14T08:38:09Z
Redmin
6857
[[en:Module:pron qualifier|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507805
Scribunto
text/plain
-- TODO: this module is now used for more than just pronunciations and should be renamed.
local export = {}
local labels_module = "Module:labels"
local qualifier_module = "Module:qualifier"
local references_module = "Module:references"
local function track(page)
require("Module:debug/track")("pron qualifier/" .. page)
return true
end
--[==[
This function is used by any module that wants to add support for (some subset of) left and right regular and accent
qualifiers, labels and references to a template, e.g. for pronunciations.
It is currently used by [[Module:IPA]], [[Module:rhymes]], [[Module:hyphenation]], [[Module:homophones]],
[[Module:affix]] and various lang-specific modules such as [[Module:es-pronunc]] (for specifying pronunciation,
rhymes, hyphenation, homophones and audio in {{tl|es-pr}}). It should potentially also be used in {{tl|audio}}.
To reduce memory usage, the caller should check that any qualifiers exist before loading the module.
`data` is a structure containing the following fields:
* `q`: List of left regular qualifiers, each a string.
* `qq`: List of right regular qualifiers, each a string.
* `qualifiers`: List of qualifiers, each a string, for compatibility. If `qualifiers_right` is given, these are
right qualifiers, otherwise left qualifiers. If both `qualifiers` and `q`/`qq` (depending on the value of
`qualifiers_right`) are non-{nil}, `qualifiers` is ignored.
* `qualifiers_right`: If specified, qualifiers in `qualifiers` are placed to the right, otherwise the left. See above.
* `a`: List of left accent qualifiers, each a string.
* `aa`: List of right accent qualifiers, each a string.
* `l`: List of left labels, each a string.
* `ll`: List of right labels, each a string.
* `refs`: {nil} or a list of references or reference specs to add directly after the text; the value of a list item
is either a string containing the reference text (typically a call to a citation template such as {{tl|cite-book}}, or
a template wrapping such a call), or an object with fields `text` (the reference text), `name` (the name of the
reference, as in {{cd|<nowiki><ref name="foo">...</ref></nowiki>}} or {{cd|<nowiki><ref name="foo" /></nowiki>}})
and/or `group` (the group of the reference, as in {{cd|<nowiki><ref name="foo" group="bar">...</ref></nowiki>}} or
{{cd|<nowiki><ref name="foo" group="bar"/></nowiki>}}); this uses a parser function to format the reference
appropriately and insert a footnote number that hyperlinks to the actual reference, located in the
{{cd|<nowiki><references /></nowiki>}} section.
* `lang`: Language object for accent qualifiers.
* `text`: The text to wrap with qualifiers.
*` raw`: Don't do any CSS wrapping of the formatted text.
The order of qualifiers and labels, on both the left and right, is (1) labels, (2) accent qualifiers, (3) regular
qualifiers. This goes in order of relative importance.
]==]
function export.format_qualifiers(data)
if not data.text then
error("Missing `data.text`; did you try to pass `text` or `qualifiers_right` as separate params?")
end
if not data.lang then
track("nolang")
end
local text = data.text
-- Format the qualifiers and labels that go either before or after the main text. They are ordered as follows, on
-- both the left and the right: (1) labels, (2) accent qualifiers, (3) regular qualifiers. This puts the different
-- types of qualifiers/labels in order of relative importance. Return nil if no qualifiers or labels, otherwise
-- a string containing all formatted qualifiers and labels surrounded by parens.
local function format_qualifier_like(labels, accent_qualifiers, qualifiers)
local has_qualifiers = qualifiers and qualifiers[1]
local has_accent_qualifiers = accent_qualifiers and accent_qualifiers[1]
local has_labels = labels and labels[1]
if not has_qualifiers and not has_accent_qualifiers and not has_labels then
return nil
end
local qualifier_like_parts = {}
local function ins(part)
table.insert(qualifier_like_parts, part)
end
local function format_label_like(labels, mode)
return require(labels_module).show_labels {
lang = data.lang,
labels = labels,
nocat = true,
mode = mode,
open = false,
close = false,
no_ib_content = true,
no_track_already_seen = true,
ok_to_destructively_modify = true, -- doesn't apply to `labels`
raw = data.raw,
}
end
local m_qualifier = require(qualifier_module)
if has_labels then
ins(format_label_like(labels))
end
if has_accent_qualifiers then
ins(format_label_like(accent_qualifiers, "accent"))
end
if has_qualifiers then
ins(m_qualifier.format_qualifiers {
qualifiers = qualifiers,
open = false,
close = false,
no_ib_content = true,
raw = data.raw,
})
end
local qualifier_inside
local function wrap_qualifier_css(txt, suffix)
if data.raw then
return txt
else
return m_qualifier.wrap_qualifier_css(txt, suffix)
end
end
if qualifier_like_parts[2] then
qualifier_inside = table.concat(qualifier_like_parts, wrap_qualifier_css(",", "comma") .. " ")
else
qualifier_inside = qualifier_like_parts[1]
end
qualifier_like_parts = {}
ins(wrap_qualifier_css("(", "brac"))
ins(wrap_qualifier_css(qualifier_inside, "content"))
ins(wrap_qualifier_css(")", "brac"))
return table.concat(qualifier_like_parts)
end
if data.refs then
text = text .. require(references_module).format_references(data.refs)
end
local leftq = format_qualifier_like(data.l, data.a, data.q or not data.qualifiers_right and data.qualifiers)
local rightq = format_qualifier_like(data.ll, data.aa, data.qq or data.qualifiers_right and data.qualifiers)
if leftq then
text = leftq .. " " .. text
end
if rightq then
text = text .. " " .. rightq
end
return text
end
return export
nkakbwx2we31h7ak7pqnd0ng4a55308
মডিউল:parse utilities
828
71729
507799
323930
2026-04-14T07:13:15Z
Redmin
6857
[[en:Module:parse utilities|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507799
Scribunto
text/plain
local export = {}
local fun_is_callable_module = "Module:fun/isCallable"
local languages_module = "Module:languages"
local parameters_module = "Module:parameters"
local string_char_module = "Module:string/char"
local string_utilities_module = "Module:string utilities"
local table_insert_if_not_module = "Module:table/insertIfNot"
local assert = assert
local concat = table.concat
local dump = mw.dumpObject
local error = error
local insert = table.insert
local ipairs = ipairs
local list_to_text = mw.text.listToText
local pairs = pairs
local require = require
local sort = table.sort
local type = type
local ugsub = mw.ustring.gsub
local function convert_val(...)
convert_val = require(parameters_module).convert_val
return convert_val(...)
end
local function get_lang(...)
get_lang = require(languages_module).getByCode
return get_lang(...)
end
local function insert_if_not(...)
insert_if_not = require(table_insert_if_not_module)
return insert_if_not(...)
end
local function is_callable(...)
is_callable = require(fun_is_callable_module)
return is_callable(...)
end
local function split(...)
split = require(string_utilities_module).split
return split(...)
end
local function u(...)
u = require(string_char_module)
return u(...)
end
local function umatch(...)
umatch = require(string_utilities_module).match
return umatch(...)
end
--[==[ intro:
In order to understand the following parsing code, you need to understand how inflected text specs work. They are
intended to work with inflected text where individual words to be inflected may be followed by inflection specs in
angle brackets. The format of the text inside of the angle brackets is up to the individual language and part-of-speech
specific implementation. A real-world example is as follows: `<nowiki>[[медичний|меди́чна]]<+> [[сестра́]]<*,*#.pr></nowiki>`.
This is the inflection of the Ukrainian multiword expression {{m|uk|меди́чна сестра́||nurse|lit=medical sister}},
consisting of two words: the adjective {{m|uk|меди́чна||medical|pos=feminine singular}} and the noun {{m|uk|сестра́||sister}}.
The specs in angle brackets follow each word to be inflected; for example, `<+>` means that the preceding word should be
declined as an adjective.
The code below works in terms of balanced expressions, which are bounded by delimiters such as `< >` or `[ ]`. The
intention is to allow separators such as spaces to be embedded inside of delimiters; such embedded separators will not
be parsed as separators. For example, Ukrainian noun specs allow footnotes in brackets to be inserted inside of angle
brackets; something like `меди́чна<+> сестра́<pr.[this is a footnote]>` is legal, as is
`<nowiki>[[медичний|меди́чна]]<+> [[сестра́]]<pr.[this is an <i>italicized footnote</i>]></nowiki>`, and the parsing code
should not be confused by the embedded brackets, spaces or angle brackets.
The parsing is done by two functions, which work in close concert: {parse_balanced_segment_run()} and
{split_alternating_runs()}. To illustrate, consider the following:
{parse_balanced_segment_run("foo<M.proper noun> bar<F>", "<", ">")} =<br />
{ {"foo", "<M.proper noun>", " bar", "<F>", ""}}
then
{split_alternating_runs({"foo", "<M.proper noun>", " bar", "<F>", ""}, " ")} =<br />
{ {{"foo", "<M.proper noun>", ""}, {"bar", "<F>", ""}}}
Here, we start out with a typical inflected text spec `foo<M.proper noun> bar<F>`, call {parse_balanced_segment_run()} on
it, and call {split_alternating_runs()} on the result. The output of {parse_balanced_segment_run()} is a list where
even-numbered segments are bounded by the bracket-like characters passed into the function, and odd-numbered segments
consist of the surrounding text. {split_alternating_runs()} is called on this, and splits '''only''' the odd-numbered
segments, grouping all segments between the specified character. Note that the inner lists output by
{split_alternating_runs()} are themselves in the same format as the output of {parse_balanced_segment_run()}, with
bracket-bounded text in the even-numbered segments. Hence, such lists can be passed again to {split_alternating_runs()}.
]==]
--[==[
Parse a string containing matched instances of parens, brackets or the like. Return a list of strings, alternating
between textual runs not containing the open/close characters and runs beginning and ending with the open/close
characters. For example,
{parse_balanced_segment_run("foo(x(1)), bar(2)", "(", ")") = {"foo", "(x(1))", ", bar", "(2)", ""}}
]==]
function export.parse_balanced_segment_run(segment_run, open, close)
return split(segment_run, "(%b" .. open .. close .. ")")
end
-- The following is an equivalent, older implementation that does not use %b (written before I was aware of %b).
--[=[
function export.parse_balanced_segment_run(segment_run, open, close)
local break_on_open_close = split(segment_run, "([%" .. open .. "%" .. close .. "])")
local text_and_specs = {}
local level = 0
local seg_group = {}
for i, seg in ipairs(break_on_open_close) do
if i % 2 == 0 then
if seg == open then
insert(seg_group, seg)
level = level + 1
else
assert(seg == close)
insert(seg_group, seg)
level = level - 1
if level < 0 then
error("Unmatched " .. close .. " sign: '" .. segment_run .. "'")
elseif level == 0 then
insert(text_and_specs, concat(seg_group))
seg_group = {}
end
end
elseif level > 0 then
insert(seg_group, seg)
else
insert(text_and_specs, seg)
end
end
if level > 0 then
error("Unmatched " .. open .. " sign: '" .. segment_run .. "'")
end
return text_and_specs
end
]=]
--[==[
Like parse_balanced_segment_run() but accepts multiple sets of delimiters. For example,
{parse_multi_delimiter_balanced_segment_run("foo[bar(baz[bat])], quux<glorp>", {{"[", "]"}, {"(", ")"}, {"<", ">"}}) =
{"foo", "[bar(baz[bat])]", ", quux", "<glorp>", ""}}.
Each element in the list of delimiter pairs is a string specifying an equivalence class of possible delimiter
characters. You can use this, for example, to allow either "[" or "&#91;" to be treated equivalently, with either
one closed by either "]" or "&#93;". To do this, first replace "&#91;" and "&#93;" with single Unicode
characters such as U+FFF0 and U+FFF1, and then specify a two-character string containing "[" and U+FFF0 as the opening
delimiter, and a two-character string containing "]" and U+FFF1 as the corresponding closing delimiter.
If `no_error_on_unmatched` is given and an error is found during parsing, a string is returned containing the error
message instead of throwing an error.
]==]
function export.parse_multi_delimiter_balanced_segment_run(segment_run, delimiter_pairs, no_error_on_unmatched)
local escaped_delimiter_pairs = {}
local open_to_close_map = {}
local open_close_items = {}
local open_items = {}
for _, open_close in ipairs(delimiter_pairs) do
local open, close = open_close[1], open_close[2]
open = open:gsub("([%[%]%%%%-])", "%%%1")
close = close:gsub("([%[%]%%%%-])", "%%%1")
insert(open_close_items, open)
insert(open_close_items, close)
insert(open_items, open)
open = "[" .. open .. "]"
close = "[" .. close .. "]"
open_to_close_map[open] = close
insert(escaped_delimiter_pairs, {open, close})
end
local open_close_pattern = "([" .. concat(open_close_items) .. "])"
local open_pattern = "([" .. concat(open_items) .. "])"
local break_on_open_close = split(segment_run, open_close_pattern)
local text_and_specs = {}
local level = 0
local seg_group = {}
local open_at_level_zero
for i, seg in ipairs(break_on_open_close) do
if i % 2 == 0 then
insert(seg_group, seg)
if level == 0 then
if not umatch(seg, open_pattern) then
local errmsg = "Unmatched close sign " .. seg .. ": '" .. segment_run .. "'"
if no_error_on_unmatched then
return errmsg
else
error(errmsg)
end
end
assert(open_at_level_zero == nil)
for _, open_close in ipairs(escaped_delimiter_pairs) do
local open = open_close[1]
if umatch(seg, open) then
open_at_level_zero = open
break
end
end
if open_at_level_zero == nil then
error(("Internal error: Segment %s didn't match any open regex"):format(seg))
end
level = level + 1
elseif umatch(seg, open_at_level_zero) then
level = level + 1
elseif umatch(seg, open_to_close_map[open_at_level_zero]) then
level = level - 1
assert(level >= 0)
if level == 0 then
insert(text_and_specs, concat(seg_group))
seg_group = {}
open_at_level_zero = nil
end
end
elseif level > 0 then
insert(seg_group, seg)
else
insert(text_and_specs, seg)
end
end
if level > 0 then
local errmsg = "Unmatched open sign " .. open_at_level_zero .. ": '" .. segment_run .. "'"
if no_error_on_unmatched then
return errmsg
else
error(errmsg)
end
end
return text_and_specs
end
--[==[
Check whether a term contains top-level HTML. We want to distinguish inline modifiers from HTML. We assume an inline
modifier is either a boolean modifier like `<bor>` or a prefix modifier like `<tr:Miryem>`. All other things inside of
angle brackets, e.g. `<nowiki><span class="foo"></nowiki>`, `<nowiki></span></nowiki>`, `<nowiki><br/></nowiki>`, etc.,
should be flagged as HTML (typically caused by wrapping an argument in {{tl|m|...}}, {{tl|af|...}} or similar, but
sometimes specified directly, e.g. `<nowiki><sup>6</sup></nowiki>`). By default, we assume the tag in an inline modifier
contains either letters, numbers, hyphens or underscore (but not spaces), and must either stand alone or be followed by
a colon, leading to a default HTML-checking pattern of {"<[%w_%-]*[^%w_%-:>]"}. But this can be modified; e.g.
[[Module:tl-pronunciation]] allows modifiers of the form `<<var>pos</var>^<var>defn</var>>` or
`<<var>pos</var>,<var>pos</var>,<var>pos</var>^<var>defn</var>>`, and would need to use its own HTML pattern. It's
important we restrict the check for HTML to top-level to allow for generated HTML inside of e.g. qualifier tags, such as
`<nowiki>foo<q:similar to {{m|fr|bar}}></nowiki>`.
]==]
function export.term_contains_top_level_html(term, html_pattern)
html_pattern = html_pattern or "<[%w_%-]*[^%w_%-:>]"
-- If no HTML anywhere, the answer is no.
if not term:find(html_pattern) then
return false
end
-- Otherwise, we have to call parse_balanced_segment_run() and check alternate runs at top level.
local runs = export.parse_balanced_segment_run(term, "<", ">")
for i = 2, #runs, 2 do
if runs[i]:find("^" .. html_pattern) then
return true
end
end
return false
end
--[==[
Check whether a term appears to have already been passed through `full_link()`. Passing it again will mangle it in
various ways; at best it will have unnecessary lang/script wrapping, which might do nothing but might result in
overly large fonts or other issues. We also check for uses of {{tl|ja-r/args}}, {{tl|ryu-r/args}} or {{tl|ko-l/args}},
which will be manged by `full_link()`. If this check succeeds, use the text raw instead of passing through
`full_link()`.
]==]
function export.term_already_linked(term)
return term:find("<span") or term:find("{{ja%-r|") or term:find("{{ryu%-r|") or term:find("{{ko%-l|")
end
--[==[
Split a list of alternating textual runs of the format returned by `parse_balanced_segment_run` on `splitchar`. This
only splits the odd-numbered textual runs (the portions between the balanced open/close characters). The return value
is a list of lists, where each list contains an odd number of elements, where the even-numbered elements of the sublists
are the original balanced textual run portions. For example, if we do
{parse_balanced_segment_run("foo<M.proper noun> bar<F>", "<", ">") =
{"foo", "<M.proper noun>", " bar", "<F>", ""}}
then
{split_alternating_runs({"foo", "<M.proper noun>", " bar", "<F>", ""}, " ") =
{{"foo", "<M.proper noun>", ""}, {"bar", "<F>", ""}}}
Note that we did not touch the text "<M.proper noun>" even though it contains a space in it, because it is an
even-numbered element of the input list. This is intentional and allows for embedded separators inside of
brackets/parens/etc. Note also that the inner lists in the return value are of the same form as the input list (i.e.
they consist of alternating textual runs where the even-numbered segments are balanced runs), and can in turn be passed
to split_alternating_runs().
If `preserve_splitchar` is passed in, the split character is included in the output, as follows:
{split_alternating_runs({"foo", "<M.proper noun>", " bar", "<F>", ""}, " ", true) =
{{"foo", "<M.proper noun>", ""}, {" "}, {"bar", "<F>", ""}}}
Consider what happens if the original string has multiple spaces between brackets, and multiple sets of brackets
without spaces between them.
{parse_balanced_segment_run("foo[dated][low colloquial] baz-bat quux xyzzy[archaic]", "[", "]") =
{"foo", "[dated]", "", "[low colloquial]", " baz-bat quux xyzzy", "[archaic]", ""}}
then
{split_alternating_runs({"foo", "[dated]", "", "[low colloquial]", " baz-bat quux xyzzy", "[archaic]", ""}, "[ %-]") =
{{"foo", "[dated]", "", "[low colloquial]", ""}, {"baz"}, {"bat"}, {"quux"}, {"xyzzy", "[archaic]", ""}}}
If `preserve_splitchar` is passed in, the split character is included in the output,
as follows:
{split_alternating_runs({"foo", "[dated]", "", "[low colloquial]", " baz bat quux xyzzy", "[archaic]", ""}, "[ %-]", true) =
{{"foo", "[dated]", "", "[low colloquial]", ""}, {" "}, {"baz"}, {"-"}, {"bat"}, {" "}, {"quux"}, {" "}, {"xyzzy", "[archaic]", ""}}}
As can be seen, the even-numbered elements in the outer list are one-element lists consisting of the separator text.
]==]
function export.split_alternating_runs(segment_runs, splitchar, preserve_splitchar)
local grouped_runs = {}
local run = {}
for i, seg in ipairs(segment_runs) do
if i % 2 == 0 then
insert(run, seg)
else
local parts = split(seg, preserve_splitchar and "(" .. splitchar .. ")" or splitchar)
insert(run, parts[1])
for j=2,#parts do
insert(grouped_runs, run)
run = {parts[j]}
end
end
end
if #run > 0 then
insert(grouped_runs, run)
end
return grouped_runs
end
--[==[
After calling `parse_multi_delimiter_balanced_segment_run()`, rejoin delimiter-bounded textual runs (i.e. textual runs
surrounded by certain matched delimiters) with the runs on either side. This can be used when some of the matched
delimiters are specified only in order to ensure that delimiters inside of other delimiters aren't parsed. As an
example, [[Module:object usage]] calls
{m_parse_utilities.parse_multi_delimiter_balanced_segment_run(object, {{"[", "]"}, {"(", ")"}, {"<", ">"}})} but the
actual syntax of {{tl|+obj}} only uses parens and angle brackets as delimiters. Square brackets are included so that
internal links are treated as units (i.e. parens and angle brackets occurring inside of them aren't parsed), but beyond
that we don't treat square brackets as delimiters, so we want to rejoin square-bracket-delimited textual runs with
adjacent runs before further parsing.
There are two primary workflows when using this function:
# If you only care about balanced delimiters occurring inside of other balanced delimiters (e.g. in the above example
with [[Module:object usage]], you can call `rejoin_delimited_runs()` directly after
`parse_multi_delimiter_balanced_segment_run()`.
# However, if you care about single delimiters such as commas and slashes occurring inside of balanced delimiters (e.g.
if you allow multiple comma-separated terms, e.g. of which can have associated inline modifiers, and you don't want
commas inside of internal links to be treated as delimiters), you need to call `rejoin_delimited_runs()` ''after''
calling `split_alternating_runs()`. This is used, for example, in `parse_inline_modifiers()` for exactly this reason,
when a `splitchar` is provided.
`data` is an object of properties. Currently there are two: `runs` (the output of calling
`parse_multi_delimiter_balanced_segment_run()`, i.e. a list of textual runs, where even-numbered elements begin and end
with a matched delimiter and odd-numbered elements are surrounding text) and `delimiter_pattern` (a Lua pattern matching
delimited textual runs that we want to rejoin with the surrounding text). `delimiter_pattern` should normally be
anchored at the beginning; e.g. {"^%["} would be the correct pattern to use when rejoining square-bracket-delimited
textual runs, as described above.
]==]
function export.rejoin_delimited_runs(data)
local joined_runs = {}
local i = 1
while i <= #data.runs do
local run = data.runs[i]
if i % 2 == 0 and run:find(data.delimiter_pattern) then
joined_runs[#joined_runs] = joined_runs[#joined_runs] .. run .. data.runs[i + 1]
i = i + 2
else
insert(joined_runs, run)
i = i + 1
end
end
return joined_runs
end
function export.strip_spaces(text)
return (ugsub(text, "^%s*(.-)%s*$", "%1"))
end
--[==[
Apply an arbitrary function `frob` to the "raw-text" segments in a split run set (the output of
split_alternating_runs()). We leave alone stuff within balanced delimiters (footnotes, inflection specs and the
like), as well as splitchars themselves if present. `preserve_splitchar` indicates whether splitchars are present
in the split run set. `frob` is a function of one argument (the string to frob) and should return one argument (the
frobbed string). We operate by only frobbing odd-numbered segments, and only in odd-numbered runs if
preserve_splitchar is given.
]==]
function export.frob_raw_text_alternating_runs(split_run_set, frob, preserve_splitchar)
for i, run in ipairs(split_run_set) do
if not preserve_splitchar or i % 2 == 1 then
for j, segment in ipairs(run) do
if j % 2 == 1 then
run[j] = frob(segment)
end
end
end
end
end
--[==[
Like split_alternating_runs() but applies an arbitrary function `frob` to "raw-text" segments in the result (i.e.
not stuff within balanced delimiters such as footnotes and inflection specs, and not splitchars if present). `frob`
is a function of one argument (the string to frob) and should return one argument (the frobbed string).
]==]
function export.split_alternating_runs_and_frob_raw_text(run, splitchar, frob, preserve_splitchar)
local split_runs = export.split_alternating_runs(run, splitchar, preserve_splitchar)
export.frob_raw_text_alternating_runs(split_runs, frob, preserve_splitchar)
return split_runs
end
--[==[
FIXME: Older entry point. Call `split_alternating_runs_and_frob_raw_text()` in [[Module:parse utilities]] directly.
Like `split_alternating_runs()` but strips spaces from both ends of the odd-numbered elements (only in odd-numbered runs
if `preserve_splitchar` is given). Effectively we leave alone the footnotes and splitchars themselves, but otherwise
strip extraneous spaces. Spaces in the middle of an element are also left alone.
]==]
function export.split_alternating_runs_and_strip_spaces(segment_runs, splitchar, preserve_splitchar)
return export.split_alternating_runs_and_frob_raw_text(segment_runs, splitchar, export.strip_spaces, preserve_splitchar)
end
--[==[
Split the non-modifier parts of an alternating run (after parse_balanced_segment_run() is called) on a Lua pattern,
but not on certain sequences involving characters in that pattern (e.g. comma+whitespace). `splitchar` is the pattern
to split on; `preserve_splitchar` indicates whether to preserve the delimiter and is the same as in
split_alternating_runs(). `escape_fun` is called beforehand on each run of raw text and should return two values:
the escaped run and whether unescaping is needed. If any call to `escape_fun` indicates that unescaping is needed,
`unescape_fun` will be called on each run of raw text after splitting on `splitchar`. The return value of this
function is as in split_alternating_runs().
]==]
function export.split_alternating_runs_escaping(run, splitchar, preserve_splitchar, escape_fun, unescape_fun)
-- First replace comma with a temporary character in comma+whitespace sequences.
local need_unescape = false
for i in ipairs(run) do
if i % 2 == 1 and escape_fun then
local this_need_unescape
run[i], this_need_unescape = escape_fun(run[i])
need_unescape = need_unescape or this_need_unescape
end
end
if need_unescape then
return export.split_alternating_runs_and_frob_raw_text(run, splitchar, unescape_fun, preserve_splitchar)
else
return export.split_alternating_runs(run, splitchar, preserve_splitchar)
end
end
--[==[
Replace comma with a temporary char in comma + whitespace.
]==]
function export.escape_comma_whitespace(run, tempcomma)
tempcomma = tempcomma or u(0xFFF0)
local escaped = false
if run:find("\\,") then
-- FIXME: we should probably convert literal \\ to \ to allow people to put a backslash before a comma that
-- should be passed through; but maybe it's enough to use an HTML escape for the comma or backslash.
run = (run:gsub("\\,", tempcomma)) -- discard backslash before comma, doing its duty to protect the comma
escaped = true
end
if run:find(",%s") then
run = (run:gsub(",(%s)", tempcomma .. "%1"))
escaped = true
end
return run, escaped
end
--[==[
Undo the replacement of comma with a temporary char.
]==]
function export.unescape_comma_whitespace(run, tempcomma)
tempcomma = tempcomma or u(0xFFF0)
return (run:gsub(tempcomma, ","))
end
--[==[
Split the non-modifier parts of an alternating run (after parse_balanced_segment_run() is called) on comma, but not
on comma+whitespace. See `split_on_comma()` above for more information and the meaning of `tempcomma`.
]==]
function export.split_alternating_runs_on_comma(run, tempcomma)
tempcomma = tempcomma or u(0xFFF0)
-- Replace comma with a temporary char in comma + whitespace.
local function escape_comma_whitespace(seg)
return export.escape_comma_whitespace(seg, tempcomma)
end
-- Undo replacement of comma with a temporary char in comma + whitespace.
local function unescape_comma_whitespace(seg)
return export.unescape_comma_whitespace(seg, tempcomma)
end
return export.split_alternating_runs_escaping(run, ",", false, escape_comma_whitespace, unescape_comma_whitespace)
end
--[==[
Split text on a Lua pattern, but not on certain sequences involving characters in that pattern (e.g.
comma+whitespace). `splitchar` is the pattern to split on; `preserve_splitchar` indicates whether to preserve the
delimiter between split segments. `escape_fun` is called beforehand on the text and should return two values: the
escaped run and whether unescaping is needed. If the call to `escape_fun` indicates that unescaping is needed,
`unescape_fun` will be called on each run of text after splitting on `splitchar`. The return value of this a list
of runs, interspersed with delimiters if `preserve_splitchar` is specified.
]==]
function export.split_escaping(text, splitchar, preserve_splitchar, escape_fun, unescape_fun)
if not umatch(text, splitchar) then
return {text}
end
-- If there are square or angle brackets, we don't want to split on delimiters inside of them. To effect this, we
-- use parse_multi_delimiter_balanced_segment_run() to parse balanced brackets, then do delimiter splitting on the
-- non-bracketed portions of text using split_alternating_runs_escaping(), and concatenate back to a list of
-- strings. When calling parse_multi_delimiter_balanced_segment_run(), we make sure not to throw an error on
-- unbalanced brackets; in that case, we fall through to the code below that handles the case without brackets.
if text:find("[%[<]") then
local runs = export.parse_multi_delimiter_balanced_segment_run(text, {{"[", "]"}, {"<", ">"}},
"no error on unmatched")
if type(runs) ~= "string" then
local split_runs = export.split_alternating_runs_escaping(runs, splitchar, preserve_splitchar, escape_fun,
unescape_fun)
for i = 1, #split_runs do
split_runs[i] = concat(split_runs[i])
end
return split_runs
end
end
-- First escape sequences we don't want to count for splitting.
local need_unescape
if escape_fun then
text, need_unescape = escape_fun(text)
end
local parts = split(text, preserve_splitchar and "(" .. splitchar .. ")" or splitchar)
if need_unescape then
for i = 1, #parts, (preserve_splitchar and 2 or 1) do
parts[i] = unescape_fun(parts[i])
end
end
return parts
end
--[==[
Split text on comma, but not on comma+whitespace. This is similar to `mw.text.split(text, ",")` but will not split
on commas directly followed by whitespace, to handle embedded commas in terms (which are almost always followed by
a space). `tempcomma` is the Unicode character to temporarily use when doing the splitting; normally U+FFF0, but
you can specify a different character if you use U+FFF0 for some internal purpose.
]==]
function export.split_on_comma(text, tempcomma)
-- Don't do anything if no comma. Note that split_escaping() has a similar check at the beginning, so if there's a
-- comma we effectively do this check twice, but this is worth it to optimize for the common no-comma case.
if not text:find(",") then
return {text}
end
tempcomma = tempcomma or u(0xFFF0)
-- Replace comma with a temporary char in comma + whitespace.
local function escape_comma_whitespace(run)
return export.escape_comma_whitespace(run, tempcomma)
end
-- Undo replacement of comma with a temporary char in comma + whitespace.
local function unescape_comma_whitespace(run)
return export.unescape_comma_whitespace(run, tempcomma)
end
return export.split_escaping(text, ",", false, escape_comma_whitespace, unescape_comma_whitespace)
end
--[==[
Ensure that Wikicode (template calls, bracketed links, HTML, bold/italics, etc.) displays literally in error messages
by inserting a Unicode word-joiner symbol after all characters that may trigger Wikicode interpretation. Replacing
with equivalent HTML escapes doesn't work because they are displayed literally. I could not get this to work using
<nowiki>...</nowiki> (those tags display literally), using using {{#tag:nowiki|...}} (same thing) or using
mw.getCurrentFrame():extensionTag("nowiki", ...) (everything gets converted to a strip marker
`UNIQ--nowiki-00000000-QINU` or similar). FIXME: This is a massive hack; there must be a better way.
]==]
function export.escape_wikicode(term)
term = term:gsub("([%[<'{])", "%1" .. u(0x2060))
return term
end
function export.make_parse_err(arg_gloss)
return function(msg, stack_frames_to_ignore)
error(export.escape_wikicode(("%s: %s"):format(msg, arg_gloss)), stack_frames_to_ignore)
end
end
-- Parse a term that may include a link '[[LINK]]' or a two-part link '[[LINK|DISPLAY]]'. FIXME: Doesn't currently
-- handle embedded links like '[[FOO]] [[BAR]]' or [[FOO|BAR]] [[BAZ]]' or '[[FOO]]s'; if they are detected, it returns
-- the term unchanged and `nil` for the display form.
local function parse_bracketed_term(term, parse_err)
local inside = term:match("^%[%[(.*)%]%]$")
if inside then
if inside:find("%[%[") or inside:find("%]%]") then
-- embedded links, e.g. '[[FOO]] [[BAR]]'; FIXME: we should process them properly
return term, nil
end
local parts = split(inside, "|")
if #parts > 2 then
parse_err("Saw more than two parts inside a bracketed link")
end
return parts[1], parts[2]
end
return term, nil
end
--[==[
Parse a term that may have a language code (or possibly multiple plus-separated language codes, if
`data.allow_multiple` is given) preceding it (e.g. {la:minūtia} or {grc:[[σκῶρ|σκατός]]} or
{nan-hbl+hak:[[毋]][[知]]}). Return five arguments:
# the original prefixed term; in the case of a Wikipedia or Wikisource prefix followed by a two-part link, it is a
two-part link with the Wikipedia/Wikisource prefix moved inside the link; in the case of a Wikipedia or Wikisource
prefix followed by a redundant one-part link, the brackets are removed;
# the language object corresponding to the language code (possibly a family object if `data.allow_family` is given), or
a list of such objects if `data.allow_multiple` is given;
# the link if the unprefixed term is of the form <code>[[<var>link</var>|<var>display</var>]]</code> or of the form
<code>[[<var>link</var>]]</code>, otherwise the full unprefixed term;
# the display part if the term is of the form <code>[[<var>link</var>|<var>display</var>]]</code> or has a Wikipedia or
Wikisource prefix (in which case the part minus the prefix and any following language code will be returned, with
redundant brackets stripped), else {nil};
# {true} if the term has a Wikipedia/Wikisource prefix, else {false}.
Etymology-only languages are always allowed. This function also correctly handles Wikipedia prefixes (e.g.
{w:Abatemarco} or {w:it:Colle Val d'Elsa} or {lw:ru:Филарет}) and Wikisource prefixes (e.g. {s:Twelve O'Clock} or
{s:[[Walden/Chapter XVIII|Walden]]} or {s:fr:Perceval ou le conte du Graal} or {s:ro:[[Domnul Vucea|Mr. Vucea]]} or
{ls:ko:이상적 부인} or {ls:ko:[[조선 독립의 서#一. 槪論|조선 독립의 서]]}) and converts them into two-part links,
with the display form not including the Wikipedia or Wikisource prefix unless it was explicitly specified using a
two-part link as in {lw:ru:[[Филарет (Дроздов)|Митрополи́т Филаре́т]]} or
{ls:ko:[[조선 독립의 서#一. 槪論|조선 독립의 서]]}. The difference between {w:} ("Wikipedia") and {lw:} ("Wikipedia
link") is that the latter requires a language code and returns the corresponding language object; same for the
difference between {s:} ("Wikisource") and {ls:} ("Wikisource link").
NOTE: Embedded links are not correctly handled currently. If an embedded link is detected, the whole term is returned
as the link part (third argument), and the display part is nil. If you construct your own link from the link and
display parts, you must check for this.
The calling convention is to pass in a single argument `data` containing the following fields:
* `term`: The term to parse.
* `parse_err`: An optional function of one or two arguments to display an error. (The second argument to the function is
the number of stack frames to ignore when calling error(); if you declare your error function with only one argument,
things will still work fine.)
* `paramname`: If `parse_err` is omitted, this should be a string naming a parameter to display in the error message,
along with the term in question, and will be used to generate a `parse_err` function using `make_parse_err()`. (If
`paramname` is omitted, just the term itself appears in the error message.)
* `allow_multiple`: Allow multiple plus-separated language codes, e.g. {nan-hbl+hak:[[毋]][[知]]}. See above.
* `allow_family`: Allow family objects to appear in place of language codes.
* `allow_bad`: Don't throw an error on invalid language code prefixes; instead, include the prefix and colon as part of
the term. Note that if a prefix doesn't look like a language code (e.g. if it's a number), the code won't even try to
parse it as a language code, regardless of the `allow_bad` setting, but will always include it in the term.
* `lang_cache`: A table mapping language codes to language objects. If the value is `false`, the language code is
invalid. If specified, the cache will be consulted before calling `getByCode()` in [[Module:languages]], and the
result cached. If not specified, no cache will be used.
]==]
function export.parse_term_with_lang(data)
local term = data.term
local parse_err = data.parse_err or
data.paramname and export.make_parse_err(("%s=%s"):format(data.paramname, term)) or
export.make_parse_err(term)
-- Parse off an initial language code (e.g. 'la:minūtia' or 'grc:[[σκῶρ|σκατός]]'). First check for Wikipedia
-- prefixes ('w:Abatemarco' or 'w:it:Colle Val d'Elsa' or 'lw:zh:邹衡') and Wikisource prefixes
-- ('s:ro:[[Domnul Vucea|Mr. Vucea]]' or 'ls:ko:이상적 부인'). Wikipedia/Wikisource language codes follow a similar
-- format to Wiktionary language codes (see below). Here and below we don't parse if there's a space after the
-- colon (happens e.g. if the user uses {{desc|...}} inside of {{col}}, grrr ...).
local termlang, foreign_wiki, actual_term = term:match("^(l?[ws]):([a-z][a-z][a-z-]*):([^ ].*)$")
if not termlang then
termlang, actual_term = term:match("^([ws]):([^ ].*)$")
end
if termlang then
local wiki_links = termlang:find("^l")
local base_wiki_prefix = termlang:find("w$") and "w:" or "s:"
local wiki_prefix = base_wiki_prefix .. (foreign_wiki and foreign_wiki .. ":" or "")
local link, display = parse_bracketed_term(actual_term, parse_err)
if link:find("%[%[") or display and display:find("%[%[") then
-- FIXME, this should be handlable with the right parsing code
parse_err("Cannot have embedded brackets following a Wikipedia (w:... or lw:...) link; expand the term to a fully bracketed term w:[[LINK|DISPLAY]] or similar")
end
local lang = wiki_links and get_lang(foreign_wiki, parse_err, "allow etym") or nil
local prefixed_link = wiki_prefix .. link
if display then
return ("[[%s|%s]]"):format(prefixed_link, display), lang, prefixed_link, display, true
else
-- Return the link minus any language codes as the fourth term (display form). Previously we returned `actual_term`
-- but this causes problems with redundant Wikipedia links of the form `w:[[Dragon Ball Z]]`. Don't generate a
-- two-part link so you can specify a display form in 3=. Note that the fourth and fifth params are currently only
-- used in [[Module:quote]].
return prefixed_link, lang, prefixed_link, link, true
end
end
-- Wiktionary language codes are in one of the following formats, where 'x' is a lowercase letter and 'X' an
-- uppercase letter:
-- xx
-- xxx
-- xxx-xxx
-- xxx-xxx-xxx (esp. for protolanguages)
-- xx-xxx (for etymology-only languages)
-- xx-xxx-xxx (maybe? for etymology-only languages)
-- xx-XX (for etymology-only languages, where XX is a country code, e.g. en-US)
-- xxx-XX (for etymology-only languages, where XX is a country code)
-- xx-xxx-XX (for etymology-only languages, where XX is a country code)
-- xxx-xxx-XX (for etymology-only langauges, where XX is a country code, e.g. nan-hbl-PH)
-- Things like xxx-x+ (e.g. cmn-pinyin, cmn-tongyong)
-- VL., LL., etc.
--
-- We check the for nonstandard Latin etymology language codes separately, and otherwise make only the following
-- assumptions:
-- (1) There are one to three hyphen-separated components.
-- (2) The last component can consist of two uppercase ASCII letters; otherwise, all components contain only
-- lowercase ASCII letters.
-- (3) Each component must have at least two letters.
-- (4) The first component must have two or three letters.
local function is_possible_lang_code(code)
-- Special hack for Latin variants, which can have nonstandard etym codes, e.g. VL., LL.
if code:find("^[A-Z]L%.$") then
return true
end
return code:find("^([a-z][a-z][a-z]?)$") or
code:find("^[a-z][a-z][a-z]?%-[A-Z][A-Z]$") or
code:find("^[a-z][a-z][a-z]?%-[a-z][a-z]+$") or
code:find("^[a-z][a-z][a-z]?%-[a-z][a-z]+%-[A-Z][A-Z]$") or
code:find("^[a-z][a-z][a-z]?%-[a-z][a-z]+%-[a-z][a-z]+$")
end
local function get_by_code(code, allow_bad)
local lang
if data.lang_cache then
lang = data.lang_cache[code]
end
if lang == nil then
lang = get_lang(code, not allow_bad and parse_err or nil, "allow etym",
data.allow_family)
if data.lang_cache then
data.lang_cache[code] = lang or false
end
end
return lang or nil
end
if data.allow_multiple then
local termlang_spec
termlang_spec, actual_term = term:match("^([a-zA-Z.,+-]+):([^ ].*)$")
if termlang_spec then
termlang = split(termlang_spec, "[,+]")
local all_possible_code = true
for _, code in ipairs(termlang) do
if not is_possible_lang_code(code) then
all_possible_code = false
break
end
end
if all_possible_code then
local saw_nil = false
for i, code in ipairs(termlang) do
termlang[i] = get_by_code(code, data.allow_bad)
if not termlang[i] then
saw_nil = true
end
end
if saw_nil then
termlang = nil
else
term = actual_term
end
else
termlang = nil
end
end
else
termlang, actual_term = term:match("^([a-zA-Z.-]+):([^ ].*)$")
if termlang then
if is_possible_lang_code(termlang) then
termlang = get_by_code(termlang, data.allow_bad)
if termlang then
term = actual_term
end
else
termlang = nil
end
end
end
local link, display = parse_bracketed_term(term, parse_err)
return term, termlang, link, display, false
end
--[==[
Parse a term that may have inline modifiers attached (e.g. {rifiuti<q:plural-only>} or
{rinfusa<t:bulk cargo><lit:resupplying><qq:more common in the plural {{m|it|rinfuse}}>}).
* `arg` is the term to parse.
* `props` is an object holding further properties controlling how to parse the term (only `param_mods` and
`generate_obj` are required):
** `paramname` is the name of the parameter where `arg` comes from, or nil if this isn't available (it is used only in
error messages).
** `param_mods` is a table describing the allowed inline modifiers (see below).
** `generate_obj` is a function of one or two arguments that should parse the argument minus the inline modifiers and
return a corresponding parsed object (into which the inline modifiers will be rewritten). If declared with one
argument, that will be the raw value to parse; if declared with two arguments, the second argument will be the
`parse_err` function (see below).
** `parse_err` is an optional function of one argument (an error message) and should display the error message, along
with any desired contextual text (e.g. the argument name and value that triggered the error). If omitted, a default
function will be generated which displays the error along with the original value of `arg` (passed through
{escape_wikicode()} above to ensure that Wikicode (such as links) is displayed literally).
** `splitchar` is a Lua pattern. If specified, `arg` can consist of multiple delimiter-separated terms, each of which
may be followed by inline modifiers, and the return value will be a list of parsed objects instead of a single
object. Note that splitting on delimiters will not happen in certain protected sequences (by default
comma+whitespace; see below). The algorithm to split on delimiters is sensitive to inline modifier syntax and will
not be confused by delimiters inside of inline modifiers, which do not trigger splitting (whether or not contained
within protected sequences).
** `outer_container`, if specified, is used when multiple delimiter-separated terms are possible, and is the object
into which the list of per-term objects is stored (into the `terms` field) and into which any modifiers that are
given the `overall` property (see below) will be stored. If given, this value will be returned as the value of
{parse_inline_modifiers()}. If `outer_container` is not given, {parse_inline_modifiers()} will return the list of
per-term objects directly, and no modifier may have an `overall` property.
** `preserve_splitchar`, if specified, causes the actual delimiter matched by `splitchar` to be returned in the
parsed object describing the element that comes after the delimiter. The delimiter is stored in a key whose
name is controlled by `delimiter_key`, which defaults to "delimiter".
** `delimiter_key` controls the key into which the actual delimiter is written when `preserve_splitchar` is used.
See above.
** `escape_fun` and `unescape_fun` are as in split_escaping() and split_alternating_runs_escaping() above and
control the protected sequences that won't be split. By default, `escape_comma_whitespace` and
`unescape_comma_whitespace` are used, so that comma+whitespace sequences won't be split. Set to `false` to disable
escaping/unescaping.
** `pre_normalize_modifiers`, if specified, is a function of one argument, which can be used to "normalize" modifiers
prior to further parsing. This is used, for example, in [[Module:tl-pronunciation]] to convert modifiers of the
form `<noun^expectation; hope>` to `<t:noun^expectation; hope>`, so they can be processed as standard modifiers. It
is also used in [[Module:ar-verb]] to convert footnotes of the form `[rare]` to `<footnote:[rare]>`, to allow for
mixing bracketed footnotes and inline modifiers when overriding verbal nouns and such. It could similarly be used to
handle boolean modifiers like `<slb>` in {{tl|desc}} and convert them to a standard form `<slb:1>`. It runs just
before parsing out the modifier prefix and value, and is passed an object containing fields `modtext` (the
un-normalized modifier text, including surrounding angle brackets, or in some cases, text surrounded by other
delimiters such as square brackets, if `parse_inline_modifiers_from_segments()` is being called and the caller did
their own parsing of balanced segment runs) and `parse_err` (the passed-in or autogenerated function to signal an
error during parsing; a function of one argument, a message, which throws an error displaying that message). It
should return a single value, the normalized value of `modtext`, including surrounding angle brackets.
`param_mods` is a table describing allowed modifiers. The keys of the table are modifier prefixes and the values are
tables describing how to parse and store the associated modifier values. Here is a typical example, for an item that
takes the standard modifiers associated with `full_link()` in [[Module:links]], as well as left and right qualifiers
and labels:
{
local param_mods = {
alt = {},
t = {
-- [[Module:links]] expects the gloss in "gloss".
item_dest = "gloss",
},
gloss = {},
tr = {},
ts = {},
g = {
-- [[Module:links]] expects the genders in "g". `sublist = true` automatically splits on comma (optionally
-- with surrounding whitespace).
item_dest = "genders",
sublist = true,
},
pos = {},
lit = {},
id = {},
sc = {
-- Automatically parse as a script code and convert to a script object.
type = "script",
},
-- Qualifiers and labels
q = {
type = "qualifier",
},
qq = {
type = "qualifier",
},
l = {
type = "labels",
},
ll = {
type = "labels",
},
}
}
In the table values:
* `item_dest` specifies the destination key to store the object into (if not the same as the modifier key itself).
* `type`, `set`, `sublist` and `convert` have the same meaning as in [[Module:parameters]] and are used for converting
the object from the string form given by the user into the form needed for further processing. Note that `type` makes
use of additional properties that may be specified. Specifically, if {type = "language"}, the properties `family` and
`method` are also examined, and if {type = "family"} or {type = "script"}, the property `method` is examined.
* `store` describes how to store the converted modifier value into the parsed object. If omitted, the converted value
is simply written into the parsed object under the appropriate key; but an error is generated if the key already has
a value. (This means that multiple occurrences of a given modifier are allowed if `store` is given, but not
otherwise.) `store` can be one of the following:
** {"insert"}: the converted value is appended to the key's value using {insert()}; if the key has no value, it
is first converted to an empty list;
** {"insertIfNot"}: is similar but appends the value using {insertIfNot()} in [[Module:table]];
** {"insert-flattened"}, the converted value is assumed to be a list and the objects are appended one-by-one into the
key's existing value using {insert()};
** {"insertIfNot-flattened"} is similar but appends using {insertIfNot()} in [[Module:table]]; (WARNING: When using
{"insert-flattened"} and {"insertIfNot-flattened"}, if there is no existing value for the key, the converted value is
just stored directly. This means that future appends will side-effect that value, so make sure that the return value
of the conversion function for this key generates a fresh list each time.)
** a function of one argument, an object with the following properties:
*** `dest`: the object to write the value into;
*** `key`: the field where the value should be written;
*** `converted`: the (converted) value to write;
*** `raw_val`: the raw, user-specified value (a string);
*** `parse_err`: a function of one argument (an error string), which signals an error, and includes extra context in
the message about the modifier in question, the angle-bracket spec that includes the modifier in it, the overall
value, and (if `paramname` was given) the parameter holding the overall value.
* `overall` only applies if `splitchar` is given. In this case, the modifier applies to the entire argument rather than
to an individual term in the argument, and must occur after the last item separated by `splitchar`, instead of being
allowed to occur after any of them. The modifier will be stored into the outer container object, which must exist
(i.e. `outer_container` must have been given).
The return value of {parse_inline_modifiers()} depends on whether `splitchar` and `outer_container` have been given. If
neither is given, the return value is the object returned by `generate_obj`. If `splitchar` but not `outer_container` is
given, the return value is a list of per-term objects, each of which is generated by `generate_obj`. If both `splitchar`
and `outer_container` are given, the return value is the value of `outer_container` and the per-term objects are stored
into the `terms` field of this object.
]==]
function export.parse_inline_modifiers(arg, props)
local segments
local function rejoin_bracket_delimited_runs(segments)
return export.rejoin_delimited_runs {
runs = segments,
delimiter_pattern = "^%[.*%]$",
}
end
local rejoin_square_brackets_after_split = false
-- The following is an optimization. If we see a square bracket (normally a double square bracket internal link
-- [[...]]), we want to not treat delimiter characters inside (either <...> balanced delimiters or separators such
-- as commas) as delimiters. But this requires a more sophisticated and slower algorithm, and most of the time it
-- isn't needed because there are no square brackets. So we check for a square bracket and fall back to a simpler
-- algorithm otherwise (which, since it involves only a single balanced delimiter, can use the built-in %b() Lua
-- pattern syntax, which AFAIK is implemented in C).
if arg:find("%[") then
segments = export.parse_multi_delimiter_balanced_segment_run(arg, {{"[", "]"}, {"<", ">"}})
if not props.splitchar then
segments = rejoin_bracket_delimited_runs(segments)
else
rejoin_square_brackets_after_split = true
end
else
segments = export.parse_balanced_segment_run(arg, "<", ">")
end
local function verify_no_overall()
for _, mod_props in pairs(props.param_mods) do
if mod_props.overall then
error("Internal caller error: Can't specify `overall` for a modifier in `param_mods` unless `outer_container` property is given")
end
end
end
if not props.splitchar then
if props.outer_container then
error("Internal caller error: Can't specify `outer_container` property unless `splitchar` is given")
end
verify_no_overall()
return export.parse_inline_modifiers_from_segments {
group = segments,
group_index = nil,
separated_groups = nil,
arg = arg,
props = props,
}
else
local terms = {}
if props.outer_container then
props.outer_container.terms = terms
else
verify_no_overall()
end
local escape_fun = props.escape_fun
if escape_fun == nil then
escape_fun = export.escape_comma_whitespace
end
local unescape_fun = props.unescape_fun
if unescape_fun == nil then
unescape_fun = export.unescape_comma_whitespace
end
local separated_groups = export.split_alternating_runs_escaping(segments, props.splitchar,
props.preserve_splitchar, escape_fun, unescape_fun)
for j = 1, #separated_groups, (props.preserve_splitchar and 2 or 1) do
if rejoin_square_brackets_after_split then
separated_groups[j] = rejoin_bracket_delimited_runs(separated_groups[j])
end
local parsed = export.parse_inline_modifiers_from_segments {
group = separated_groups[j],
group_index = j,
separated_groups = separated_groups,
arg = arg,
props = props,
}
if props.preserve_splitchar and j > 1 then
parsed[props.delimiter_key or "delimiter"] = separated_groups[j - 1][1]
end
insert(terms, parsed)
end
if props.outer_container then
return props.outer_container
else
return terms
end
end
end
--[==[
Parse a single term that may have inline modifiers attached. This is a helper function of {parse_inline_modifiers()} but
is exported separately in case the caller needs to make their own call to {parse_balanced_segment_run()} (as in
[[Module:quote]], which splits on several matched delimiters simultaneously). It takes only a single argument, `data`,
which is an object with the following fields:
* `group`: A list of segments as output by {parse_balanced_segment_run()} (see the overall comment at the top of
[[Module:parse utilities]]), or one of the lists returned by calling {split_alternating_runs()}.
* `separated_groups`: The list of groups (each of which is of the form of `group`) describing all the terms in the
argument parsed by {parse_inline_modifiers()}, or {nil} if this isn't applicable (i.e. multiple terms aren't allowed
in the argument). Currently used only the check the number of groups in the list against `group_index`.
* `group_index`: The index into `separated_groups` where `group` can be found, or {nil} if not applicable (see below).
* `arg`: The original user-specified argument being parsed; used only for error messages and only when `props.parse_err`
is not specified.
* `props`: The `props` argument to {parse_inline_modifiers()}.
The return value is the object created by `generate_obj`, with properties filled in describing the modifiers of the
term in question. Note that `props.outer_container` and the `overall` setting of the `props.param_mods` structure are
respected, but `props.splitchar` is ignored because the splitting happens in the caller. Specifically, if there are any
modifiers with the `overall` setting, `props.separated_groups` and `props.group_index` must be given so that the
function is able to determine if the modifier is indeed attached to the last term, and `props.outer_container` must be
given because that is where such modifiers are stored. Otherwise, none of these settings need be given.
]==]
function export.parse_inline_modifiers_from_segments(data)
local props = data.props
local group = data.group
local function get_valid_prefixes()
local valid_prefixes = {}
for param_mod, mod_props in pairs(props.param_mods) do
if not mod_props.deprecated then
insert(valid_prefixes, param_mod)
end
end
sort(valid_prefixes)
return valid_prefixes
end
local function get_arg_gloss()
if props.paramname then
return ("%s=%s"):format(props.paramname, data.arg)
else
return data.arg
end
end
local parse_err = props.parse_err or export.make_parse_err(get_arg_gloss())
local term_obj = props.generate_obj(group[1], parse_err)
for k = 2, #group - 1, 2 do
if group[k + 1] ~= "" then
parse_err("Extraneous text '" .. group[k + 1] .. "' after modifier")
end
local group_k = group[k]
if props.pre_normalize_modifiers then
-- FIXME: For some use cases, we might have to pass more information.
group_k = props.pre_normalize_modifiers {
modtext = group_k,
parse_err = parse_err
}
end
local modtext = group_k:match("^<(.*)>$")
if not modtext then
parse_err("Internal error: Modifier '" .. group_k .. "' isn't surrounded by angle brackets")
end
local prefix, val = modtext:match("^([a-zA-Z0-9+_-]+):(.*)$")
if not prefix then
local valid_prefixes = get_valid_prefixes()
for i, valid_prefix in ipairs(valid_prefixes) do
valid_prefixes[i] = "'" .. valid_prefix .. ":'"
end
parse_err(("Modifier %s%s lacks a prefix, should begin with one of %s"):format(
group_k, group_k ~= group[k] and (" (normalized from %s)"):format(group[k]) or "",
list_to_text(valid_prefixes)))
end
local prefix_parse_err
if props.parse_err then
prefix_parse_err = function(msg, stack_frames_to_ignore)
props.parse_err(("%s: modifier prefix '%s' in %s"):format(msg, prefix, group[k]),
stack_frames_to_ignore)
end
else
prefix_parse_err = export.make_parse_err(("modifier prefix '%s' in %s in %s"):format(
prefix, group[k], get_arg_gloss()))
end
if props.param_mods[prefix] then
local mod_props = props.param_mods[prefix]
if mod_props.replaced_by == false then
prefix_parse_err(
("Prefix has been removed and is no longer valid%s%s"):format(
mod_props.reason and ", " .. mod_props.reason or "",
mod_props.instead and "; instead, " .. mod_props.instead or "")
)
elseif mod_props.replaced_by then
prefix_parse_err(
("Prefix has been replaced by '%s'%s"):format(
mod_props.replaced_by, mod_props.reason and ", " .. mod_props.reason or "")
)
end
local key = mod_props.item_dest or prefix
local dest
if mod_props.overall then
if not data.separated_groups then
prefix_parse_err("Internal error: `data.separated_groups` not given when `overall` is seen")
end
if not props.outer_container then
-- This should have been caught earlier during validation in parse_inline_modifiers().
prefix_parse_err("Internal error: `props.outer_container` not given when `overall` is seen")
end
if data.group_index ~= #data.separated_groups then
prefix_parse_err("Prefix should occur after the last comma-separated term")
end
dest = props.outer_container
else
dest = term_obj
end
local converted = val
if mod_props.type or mod_props.set or mod_props.sublist or mod_props.convert then
-- WARNING: Here as an optimization we embed some knowledge of convert_val() in [[Module:parameters]],
-- specifically that if none of `type`, `set`, `sublist` and `convert` are set, the conversion is an
-- identity operation and can be skipped. (convert_val() also makes use of the fields `method` and
-- `family`, but only if `type` is set to certain values such as "language", "family" or "script", and
-- makes use of the field `required`, but only if `set` is set.) If this becomes problematic, consider
-- removing the optimization.
converted = convert_val(converted, prefix_parse_err, mod_props)
end
local store = props.param_mods[prefix].store
if not store then
if dest[key] then
prefix_parse_err("Prefix occurs twice")
end
dest[key] = converted
elseif store == "insert" then
if not dest[key] then
dest[key] = {converted}
else
insert(dest[key], converted)
end
elseif store == "insertIfNot" then
if not dest[key] then
dest[key] = {converted}
else
insert_if_not(dest[key], converted)
end
elseif store == "insert-flattened" then
if not dest[key] then
dest[key] = converted
else
for _, obj in ipairs(converted) do
insert(dest[key], obj)
end
end
elseif store == "insertIfNot-flattened" then
if not dest[key] then
dest[key] = converted
else
for _, obj in ipairs(converted) do
insert_if_not(dest[key], obj)
end
end
elseif type(store) == "string" then
prefix_parse_err(("Internal caller error: Unrecognized value '%s' for `store` property"):format(store))
elseif not is_callable(store) then
prefix_parse_err(("Internal caller error: Unrecognized type for `store` property %s"):format(dump(store)))
else
store{
dest = dest,
key = key,
converted = converted,
raw = val,
parse_err = prefix_parse_err
}
end
else
local valid_prefixes = get_valid_prefixes()
for i, valid_prefix in ipairs(valid_prefixes) do
valid_prefixes[i] = "'" .. valid_prefix .. "'"
end
prefix_parse_err("Unrecognized prefix, should be one of " ..
list_to_text(valid_prefixes))
end
end
return term_obj
end
return export
rpyhwrvd7abocvc6jbx3039wwqhfnqz
نذير
0
72486
507779
335044
2026-04-13T19:32:28Z
Redmin
6857
507779
wikitext
text/x-wiki
=={{ভাষা|ar}}==
===ব্যুৎপত্তি===
From the root {{ar-root|ن ذ ر}}.
===উচ্চারণ===
* {{ar-IPA|نَذِير}}
===বিশেষ্য===
{{ar-noun|نَذِير|m|pl=نُذُر}}
# [[সতর্ককারী]]; যে [[দুঃসংবাদ]] নিয়ে আসে
#: {{syn|ar|مُنْذِر}}
#: {{ant|ar|بَشِير|مُبَشِّر}}
#* {{RQ:Qur'an|33|45|passage=يَا أَيُّهَا النَّبِيُّ إِنَّا أَرْسَلْنَاكَ شَاهِدًا وَمُبَشِّرًا وَ'''نَذِيرًا'''|t=O Prophet, indeed We have sent you as a witness and a bringer of good tidings and a warner.}}
#* {{RQ:Qur'an|53|56|passage=هَٰذَا نَذِيرٌ مِنَ النُّذُرِ الْأُولَى|t=This is a warner of the warners of old.}}
# {{lb|ar|by extension}} আল্লাহর নবী
# [[সতর্কতা]]
#: {{uxi|ar|نَذِيرُ شُؤْمٍ|An ill omen}}
#* {{RQ:Qur'an|54|5|passage=حِكْمَةٌ بَالِغَةٌ فَمَا تُغْنِ النُّذُرُ|t=Extensive wisdom - but warning does not avail [them].}}
#* {{RQ:Qur'an|74|35-36|passage=إِنَّهَا لَإِحْدَى الْكُبَرِ / نَذِيرًا لِلْبَشَرِ|t=Indeed, it [i.e., the Fire] is of the greatest [afflictions].As a warning to humanity.}}
====Declension====
{{ar-decl-noun|نَذِير|pl=نُذُر}}
===নামবাচক বিশেষ্য===
{{ar-proper noun|نَذِير|m}}
# {{প্রদত্ত নাম|ar|পুরুষ}}
====Declension====
{{ar-decl-noun|نَذِير:di|state=ind-def}}
siby0t5ybkhoaey6e8un3753k2repsc
টেমপ্লেট:archaic spelling of
10
123858
507809
251563
2026-04-14T11:12:02Z
Redmin
6857
507809
wikitext
text/x-wiki
<includeonly>{{ {{#if:{{{ভাষা|}}}|check deprecated lang param usage|no deprecated lang param usage}}|lang={{{ভাষা|}}}|<!--
-->{{#invoke:form of/templates|form_of_t|[[Appendix:Glossary#archaic|archaic]] spelling of|cat=archaic forms|withcap=1|withdot=1}}<!--
-->}}
</includeonly>
r73ix4ym297ujvrc0e3iymsn6sy48gl
মডিউল:pages
828
156135
507795
324208
2026-04-14T06:59:12Z
Redmin
6857
[[en:Module:pages|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507795
Scribunto
text/plain
local export = {}
local string_utilities_module = "Module:string utilities"
local concat = table.concat
local find = string.find
local format = string.format
local getmetatable = getmetatable
local get_current_section -- defined below
local get_namespace_shortcut -- defined below
local get_pagetype -- defined below
local gsub = string.gsub
local insert = table.insert
local is_internal_title -- defined below
local is_title -- defined below
local lower = string.lower
local match = string.match
local new_title = mw.title.new
local require = require
local sub = string.sub
local title_equals = mw.title.equals
local tonumber = tonumber
local type = type
local ufind = mw.ustring.find
local unstrip_nowiki = mw.text.unstripNoWiki
--[==[
Loaders for functions in other modules, which overwrite themselves with the target function when called. This ensures modules are only loaded when needed, retains the speed/convenience of locally-declared pre-loaded functions, and has no overhead after the first call, since the target functions are called directly in any subsequent calls.]==]
local function decode_entities(...)
decode_entities = require(string_utilities_module).decode_entities
return decode_entities(...)
end
local function ulower(...)
ulower = require(string_utilities_module).lower
return ulower(...)
end
local function trim(...)
trim = require(string_utilities_module).trim
return trim(...)
end
--[==[
Loaders for objects, which load data (or some other object) into some variable, which can then be accessed as "foo or get_foo()", where the function get_foo sets the object to "foo" and then returns it. This ensures they are only loaded when needed, and avoids the need to check for the existence of the object each time, since once "foo" has been set, "get_foo" will not be called again.]==]
local current_frame
local function get_current_frame()
current_frame, get_current_frame = mw.getCurrentFrame(), nil
return current_frame
end
local parent_frame
local function get_parent_frame()
parent_frame, get_parent_frame = (current_frame or get_current_frame()):getParent(), nil
return parent_frame
end
local namespace_shortcuts
local function get_namespace_shortcuts()
namespace_shortcuts, get_namespace_shortcuts = {
[4] = "WT",
[10] = "T",
[14] = "CAT",
[100] = "AP",
[110] = "WS",
[118] = "RC",
[828] = "MOD",
}, nil
return namespace_shortcuts
end
do
local transcluded
--[==[
Returns {true} if the current {{tl|#invoke:}} is being transcluded, or {false} if not. If the current {{tl|#invoke:}} is part of a template, for instance, this template will therefore return {true}.
Note that if a template containing an {{tl|#invoke:}} is used on its own page (e.g. to display a demonstration), this function is still able to detect that this is transclusion. This is an improvement over the other method for detecting transclusion, which is to check the parent frame title against the current page title, which fails to detect transclusion in that instance.]==]
function export.is_transcluded()
if transcluded == nil then
transcluded = (parent_frame or get_parent_frame()) and parent_frame:preprocess("<includeonly>1</includeonly>") == "1" or false
end
return transcluded
end
end
do
local preview
--[==[
Returns {true} if the page is currently being viewed in preview, or {false} if not.]==]
function export.is_preview()
if preview == nil then
preview = (current_frame or get_current_frame()):preprocess("{{REVISIONID}}") == ""
end
return preview
end
end
--[==[
Returns {true} if the input is a title object, or {false} if not. This therefore '''includes''' external title objects (i.e. those for pages on other wikis), such as [[w:Example]], unlike `is_internal_title` below.]==]
function export.is_title(val)
if not (val and type(val) == "table") then
return false
end
local mt = getmetatable(val)
-- There's no foolproof method for checking for a title object, but the
-- __eq metamethod should be mw.title.equals unless the object has been
-- seriously messed around with.
return mt and
type(mt) == "table" and
getmetatable(mt) == nil and
mt.__eq == title_equals and
true or false
end
is_title = export.is_title
--[==[
Returns {true} if the input is an internal title object, or {false} if not. An internal title object is a title object for a page on this wiki, such as [[example]]. This therefore '''excludes''' external title objects (i.e. those for pages on other wikis), such as [[w:Example]], unlike `is_title` above.]==]
function export.is_internal_title(title)
-- Note: Mainspace titles starting with "#" should be invalid, but a bug in mw.title.new and mw.title.makeTitle means a title object is returned that has the empty string for prefixedText, so they need to be filtered out.
return is_title(title) and #title.prefixedText > 0 and #title.interwiki == 0
end
is_internal_title = export.is_internal_title
--[==[
Returns {true} if the input string is a valid link target, or {false} if not. This therefore '''includes''' link targets to other wikis, such as [[w:Example]], unlike `is_valid_page_name` below.]==]
function export.is_valid_link_target(target)
local target_type = type(target)
if target_type == "string" then
return is_title(new_title(target))
end
error(format("bad argument #1 to 'is_valid_link_target' (string expected, got %s)", target_type), 2)
end
--[==[
Returns {true} if the input string is a valid page name on this wiki, or {false} if not. This therefore '''excludes''' page names on other wikis, such as [[w:Example]], unlike `is_valid_link_target` above.]==]
function export.is_valid_page_name(name)
local name_type = type(name)
if name_type == "string" then
return is_internal_title(new_title(name))
end
error(format("bad argument #1 to 'is_valid_page_name' (string expected, got %s)", name_type), 2)
end
--[==[
Given a title object, returns a full link target which will always unambiguously link to it.
For instance, the input {"foo"} (for the page [[foo]]) returns {":foo"}, as a leading colon always refers to mainspace, even when other namespaces might be assumed (e.g. when transcluding using `{{ }}` syntax).
If `shortcut` is set, then the returned target will use the namespace shortcut, if any; for example, the title for `Template:foo` would return {"T:foo"} instead of {"Template:foo"}.]==]
function export.get_link_target(title, shortcut)
if not is_title(title) then
error(format("bad argument #1 to 'is_valid_link_target' (title object expected, got %s)", type(title)))
elseif title.interwiki ~= "" then
return title.fullText
elseif shortcut then
local fragment = title.fragment
if fragment == "" then
return get_namespace_shortcut(title) .. ":" .. title.text
end
return get_namespace_shortcut(title) .. ":" .. title.text .. "#" .. fragment
elseif title.namespace == 0 then
return ":" .. title.fullText
end
return title.fullText
end
do
local function find_sandbox(text)
return find(text, "^User:.") or find(lower(text), "sandbox", 1, true)
end
local function get_transclusion_subtypes(title, main_type, documentation, page_suffix)
local text, subtypes = title.text, {main_type}
-- Any template/module with "sandbox" in the title. These are impossible
-- to screen for more accurately, as there's no consistent pattern. Also
-- any user sandboxes in the form (e.g.) "Template:User:...".
local sandbox = find_sandbox(text)
if sandbox then
insert(subtypes, "sandbox")
end
-- Any template/module testcases (which can be labelled and/or followed
-- by further subpages).
local testcase = find(text, "./[Tt]estcases?%f[%L]")
if testcase then
-- Order "testcase" and "sandbox" based on where the patterns occur
-- in the title.
local n = sandbox and sandbox < testcase and 3 or 2
insert(subtypes, n, "testcase")
end
-- Any template/module documentation pages.
if documentation then
insert(subtypes, "documentation")
end
local final = subtypes[#subtypes]
if not (final == main_type and not page_suffix or final == "sandbox") then
insert(subtypes, "page")
end
return concat(subtypes, " ")
end
local function get_snippet_subtypes(title, main_type, documentation)
local ns = title.namespace
return get_transclusion_subtypes(title, (
ns == 2 and "user " or
ns == 8 and match(title.text, "^Gadget-.") and "gadget " or
""
) .. main_type, documentation)
end
--[==[
Returns the page type of the input title object in a format which can be used in running text.]==]
function export.get_pagetype(title)
if not is_internal_title(title) then
error(mw.dumpObject(title.fullText) .. " is not a valid page name.")
end
-- If possibly a documentation page, get the base title and set the
-- `documentation` flag.
local content_model, text, documentation = title.contentModel
if content_model == "wikitext" then
text = title.text
if title.isSubpage and title.subpageText == "documentation" then
local base_title = title.basePageTitle
if base_title then
title, content_model, text, documentation = base_title, base_title.contentModel, base_title.text, true
end
end
end
-- Content models have overriding priority, as they can appear in
-- nonstandard places due to page content model changes.
if content_model == "css" or content_model == "sanitized-css" then
return get_snippet_subtypes(title, "stylesheet", documentation)
elseif content_model == "javascript" then
return get_snippet_subtypes(title, "script", documentation)
elseif content_model == "json" then
return get_snippet_subtypes(title, "JSON data", documentation)
elseif content_model == "MassMessageListContent" then
return get_snippet_subtypes(title, "mass message delivery list", documentation)
-- Modules.
elseif content_model == "Scribunto" then
return get_transclusion_subtypes(title, "module", documentation, false)
elseif content_model == "text" then
return "page" -- ???
-- Otherwise, the content model is "wikitext", so check namespaces.
elseif title.isTalkPage then
return "talk page"
end
local ns = title.namespace
-- Main namespace.
if ns == 0 then
return "entry"
-- Wiktionary:
elseif ns == 4 then
return find_sandbox(title.text) and "sandbox" or "project page"
-- Template:
elseif ns == 10 then
return get_transclusion_subtypes(title, "template", documentation, false)
end
-- Convert the namespace to lowercase, unless it contains a capital
-- letter after the initial letter (e.g. MediaWiki, TimedText). Also
-- normalize any underscores.
local ns_text = gsub(title.nsText, "_", " ")
if ufind(ns_text, "^%U*$", 2) then
ns_text = ulower(ns_text)
end
-- User:
if ns == 2 then
return ns_text .. " " .. (title.isSubpage and "subpage" or "page")
-- Category: and Appendix:
elseif ns == 14 or ns == 100 then
return ns_text
-- Thesaurus: and Reconstruction:
elseif ns == 110 or ns == 118 then
return ns_text .. " entry"
end
return ns_text .. " page"
end
get_pagetype = export.get_pagetype
end
--[==[
Returns {true} if the input title object is for a content page, or {false} if not. A content page is a page that is considered part of the dictionary itself, and excludes pages for discussion, administration, maintenance etc.]==]
function export.is_content_page(title)
if not is_internal_title(title) then
error(mw.dumpObject(title.fullText) .. " is not a valid page name.")
end
local ns = title.namespace
-- (main), Appendix, Thesaurus, Citations, Reconstruction.
return (ns == 0 or ns == 100 or ns == 110 or ns == 114 or ns == 118) and
title.contentModel == "wikitext"
end
--[==[
Returns {true} if the input title object is for a documentation page, or {false} if not.]==]
function export.is_documentation(title)
return match(get_pagetype(title), "%f[%w]documentation%f[%W]") and true or false
end
--[==[
Returns {true} if the input title object is for a sandbox, or {false} if not.
By default, sandbox documentation pages are excluded, but this can be overridden with the `include_documentation` parameter.]==]
function export.is_sandbox(title, include_documentation)
local pagetype = get_pagetype(title)
return match(pagetype, "%f[%w]sandbox%f[%W]") and (
include_documentation or
not match(pagetype, "%f[%w]documentation%f[%W]")
) and true or false
end
--[==[
Returns {true} if the input title object is for a testcase page, or {false} if not.
By default, testcase documentation pages are excluded, but this can be overridden with the `include_documentation` parameter.]==]
function export.is_testcase_page(title, include_documentation)
local pagetype = get_pagetype(title)
return match(pagetype, "%f[%w]testcase%f[%W]") and (
include_documentation or
not match(pagetype, "%f[%w]documentation%f[%W]")
) and true or false
end
--[==[
Returns the namespace shortcut for the input title object, or else the namespace text. For example, a `Template:` title returns {"T"}, a `Module:` title returns {"MOD"}, and a `User:` title returns {"User"}.]==]
function export.get_namespace_shortcut(title)
return (namespace_shortcuts or get_namespace_shortcuts())[title.namespace] or title.nsText
end
get_namespace_shortcut = export.get_namespace_shortcut
do
local function check_level(lvl)
if type(lvl) ~= "number" then
error("Heading levels must be numbers.")
elseif lvl < 1 or lvl > 6 or lvl % 1 ~= 0 then
error("Heading levels must be integers between 1 and 6.")
end
return lvl
end
--[==[
A helper function which iterates over the headings in `text`, which should be the content of a page or (main) section.
Each iteration returns three values: `sec` (the section title), `lvl` (the section level) and `loc` (the index of the section in the given text, from the first equals sign). The section title will be automatically trimmed, and any HTML entities will be resolved.
The optional parameter `a` (which should be an integer between 1 and 6) can be used to ensure that only headings of the specified level are iterated over. If `b` is also given, then they are treated as a range.
The optional parameters `a` and `b` can be used to specify a range, so that only headings with levels in that range are returned.]==]
local function find_headings(text, a, b)
a = a and check_level(a) or nil
b = b and check_level(b) or a or nil
local start, loc, lvl, sec = 1
return function()
repeat
loc, lvl, sec, start = match(text, "()%f[^%z\n](==?=?=?=?=?)([^\n]+)%2[\t ]*%f[%z\n]()", start)
lvl = lvl and #lvl
until not (sec and a) or (lvl >= a and lvl <= b)
return sec and trim(decode_entities(sec)) or nil, lvl, loc
end
end
local function _get_section(content, name, level)
if not (content and name) then
return nil
elseif find(name, "\n", 1, true) then
error("Heading name cannot contain a newline.")
end
level = level and check_level(level) or nil
name = trim(decode_entities(name))
local start
for sec, lvl, loc in find_headings(content, level and 1 or nil, level) do
if start and lvl <= level then
return sub(content, start, loc - 1)
elseif not start and (not level or lvl == level) and sec == name then
start, level = loc, lvl
end
end
return start and sub(content, start)
end
--[==[
A helper function to return the content of a page section.
`content` is raw wikitext, `name` is the requested section, and `level` is an optional parameter that specifies
the required section heading level. If `level` is not supplied, then the first section called `name` is returned.
`name` can either be a string or table of section names. If a table, each name represents a section that has the
next as a subsection. For example, { {"Spanish", "Noun"}} will return the first matching section called "Noun"
under a section called "Spanish". These do not have to be at adjacent levels ("Noun" might be L4, while "Spanish"
is L2). If `level` is given, it refers to the last name in the table (i.e. the name of the section to be returned).
The returned section includes all of its subsections. If no matching section is found, return {nil}.]==]
function export.get_section(content, names, level)
if type(names) ~= "table" then
return _get_section(content, names, level)
end
local i = 1
local name = names[i]
if not name then
error("Must specify at least 1 section.")
end
while true do
local nxt_i = i + 1
local nxt = names[nxt_i]
if nxt == nil then
return _get_section(content, name, level)
end
content = _get_section(content, name)
if content == nil then
return nil
elseif i == 6 then
error("Not possible specify more than 6 sections: headings only go up to level 6.")
end
i = nxt_i
name = names[i]
end
return content
end
end
--[==[
Convert a physical pagename (or the current pagename, if {nil} is passed in) to its logical equivalent, but only if
the physical pagename refers to one of the splits of a mammoth page such as [[a]]. Otherwise this simply returns the
subpage (the part after the last slash in namespace other than the main one, otherwise the same as passed in), unless
`include_base` is specified, in which case the return value will be the whole pagename minus the namespace, including
the base (the part before the final slash).
FIXME: This should be augmented with logic to handle unsupported titles, which is currently in process_page() in
[[Module:headword/page]], so that it is a general physical-to-logical conversion function.
Examples:
* {physical_to_logical_pagename_if_mammoth("a")} → {"a"}
* {physical_to_logical_pagename_if_mammoth("a/languages A to L")} → {"a"} (since [[a]] is marked as a mammoth page in [[Module:links/data]])
* {physical_to_logical_pagename_if_mammoth("50/50")} → {"50/50"} (since [[50/50]] is in the main namespace)
* {physical_to_logical_pagename_if_mammoth("Appendix:Lojban/a")} → "a"
* {physical_to_logical_pagename_if_mammoth("Appendix:Lojban/a", true)} → "Lojban/a"
* {physical_to_logical_pagename_if_mammoth("Reconstruction:Proto-Slavic/a")} → "a"
* {physical_to_logical_pagename_if_mammoth("Reconstruction:Proto-Slavic/a", true)} → "Proto-Slavic/a"
* {physical_to_logical_pagename_if_mammoth("User:Example/sandbox/foo")} → "foo"
* {physical_to_logical_pagename_if_mammoth("User:Example/sandbox/foo", true)} → "Example/sandbox/foo"
]==]
function export.physical_to_logical_pagename_if_mammoth(title, include_base)
if title == nil then
title = mw.title.getCurrentTitle()
elseif not is_title(title) then
title = mw.title.new(title)
end
if title.nsText == "" then
-- Formerly we checked for the specific known subpages of a given mammoth split page, e.g. we would convert
-- [[a/languages M to Z]] to [[a]] assuming that [[a/languages M to Z]] was one of the splits, but not
-- [[a/languages N to Z]]. To simplify this, we just convert anything with the right mammoth split page format
-- on the assumption that it's unlikely we will ever have a legitimate non-mammoth-split pagename of this sort.
local pagename = title.text
local mammoth_root_page = pagename:match("^(.*)/languages [A-Z] to [A-Z]$")
if mammoth_root_page then
pagename = mammoth_root_page
end
return pagename
elseif include_base then
return title.text
else
return title.subpageText
end
end
--[==[
Obsolete name for `physical_to_logical_pagename_if_mammoth`. FIXME: Replace all uses and remove.
]==]
function export.safe_page_name(title)
return export.physical_to_logical_pagename_if_mammoth(title)
end
do
local current_section
--[==[
A function which returns the number of the page section which contains the current {#invoke}.]==]
function export.get_current_section()
if current_section ~= nil then
return current_section
end
local extension_tag = (current_frame or get_current_frame()).extensionTag
-- We determine the section via the heading strip marker count, since they're numbered sequentially, but the only way to do this is to generate a fake heading via frame:preprocess(). The native parser assigns each heading a unique marker, but frame:preprocess() will return copies of older markers if the heading is identical to one further up the page, so the fake heading has to be unique to the page. The best way to do this is to feed it a heading containing a nowiki marker (which we will need later), since those are always unique.
local nowiki_marker = extension_tag(current_frame, "nowiki")
-- Note: heading strip markers have a different syntax to the ones used for tags.
local h = tonumber(match(
current_frame:preprocess("=" .. nowiki_marker .. "="),
"\127'\"`UNIQ%-%-h%-(%d+)%-%-QINU`\"'\127"
))
-- For some reason, [[Special:ExpandTemplates]] doesn't generate a heading strip marker, so if that happens we simply abort early.
if not h then
return 0
end
-- The only way to get the section number is to increment the heading count, so we store the offset in nowiki strip markers which can be retrieved by procedurally unstripping nowiki markers, counting backwards until we find a match.
local n, offset = tonumber(match(
nowiki_marker,
"\127'\"`UNIQ%-%-nowiki%-([%dA-F]+)%-QINU`\"'\127"
), 16)
while not offset and n > 0 do
n = n - 1
offset = match(
unstrip_nowiki(format("\127'\"`UNIQ--nowiki-%08X-QINU`\"'\127", n)),
"^HEADING\1(%d+)" -- Prefix "HEADING\1" prevents collisions.
)
end
offset = offset and (offset + 1) or 0
extension_tag(current_frame, "nowiki", "HEADING\1" .. offset)
current_section = h - offset
return current_section
end
get_current_section = export.get_current_section
end
do
local L2_sections, current_L2
local function get_L2_sections()
L2_sections, get_L2_sections = mw.loadData("Module:headword/data").page.L2_sections, nil
return L2_sections
end
--[==[
A function which returns the name of the L2 language section which contains the current {#invoke}.]==]
function export.get_current_L2()
if current_L2 ~= nil then
return current_L2 or nil -- Return nil if current_L2 is false (i.e. there's no L2).
end
local section = get_current_section()
while section > 0 do
local L2 = (L2_sections or get_L2_sections())[section]
if L2 then
current_L2 = L2
return L2
end
section = section - 1
end
current_L2 = false
return nil
end
end
return export
dflgelqwqn49uq9wx18x5vggpgn6t4j
মডিউল:anchors
828
156157
507794
324230
2026-04-14T06:52:04Z
Redmin
6857
[[en:Module:anchors|ইংরেজি উইকিঅভিধান]] থেকে হালনাগাদ করা হল
507794
Scribunto
text/plain
local export = {}
local string_utilities_module = "Module:string utilities"
local anchor_encode = mw.uri.anchorEncode
local concat = table.concat
local insert = table.insert
local language_anchor -- Defined below.
local function decode_entities(...)
decode_entities = require(string_utilities_module).decode_entities
return decode_entities(...)
end
local function encode_entities(...)
encode_entities = require(string_utilities_module).encode_entities
return encode_entities(...)
end
-- Returns the anchor text to be used as the fragment of a link to a language section.
function export.language_anchor(lang, id)
return anchor_encode(lang:getFullName() .. ": " .. id)
end
language_anchor = export.language_anchor
-- Normalizes input text (removes formatting etc.), which can then be used as an anchor in an `id=` field.
function export.normalize_anchor(str)
return decode_entities(anchor_encode(str))
end
function export.make_anchors(ids)
local anchors = {}
for i = 1, #ids do
local id = ids[i]
local el = mw.html.create("span")
:addClass("template-anchor")
:attr("id", anchor_encode(id))
:attr("data-id", id)
insert(anchors, tostring(el))
end
return concat(anchors)
end
function export.senseid(lang, id, tag_name)
-- The following tag is opened but never closed, where is it supposed to be closed?
-- with <li> it doesn't matter, as it is closed automatically.
-- with <p> it is a problem
-- Cannot use mw.html here as it always closes tags
return "<" .. tag_name .. " class=\"senseid\" id=\"" .. language_anchor(lang, id) .. "\" data-lang=\"" .. lang:getCode() .. "\" data-id=\"" .. encode_entities(id) .. "\">"
end
function export.etymid(lang, id)
-- Use a <ul> tag to ensure spacing doesn't get messed up.
local el = mw.html.create("ul")
:addClass("etymid")
:attr("id", language_anchor(lang, id))
:attr("data-lang", lang:getCode())
:attr("data-id", id)
return tostring(el)
end
function export.etymonid(lang, id, opts)
opts = opts or {}
-- Use a <ul> tag to ensure spacing doesn't get messed up.
local el = mw.html.create("ul")
:addClass("etymonid")
:attr("data-lang", lang:getCode())
if id then
el:attr("id", language_anchor(lang, id))
el:attr("data-id", id)
end
if opts.no_tree then
el:attr("data-no-tree", "1")
end
if opts.title then
el:attr("data-title", opts.title)
end
if opts.empty_tree then
el:attr("data-empty-tree", "1")
end
return tostring(el)
end
return export
d0onsd99m5ccscopc1i38n0lhzar2z3
বিষয়শ্রেণী:ভুক্তিযুক্ত পাতা
14
156749
507808
325340
2026-04-14T09:21:51Z
Redmin
6857
+d
507808
wikitext
text/x-wiki
__HIDDENCAT__
{{delete|বর্তমানে অপ্রয়জনীয়}}
[[বিষয়শ্রেণী:উইকিঅভিধান]]
7j769ixv71e6vqehhwzzxwl0obu2f4p
মডিউল:affix/lang-data/fr
828
167300
507800
2026-04-14T08:36:02Z
RedminBot
9553
[[en:Module:affix/lang-data/fr
]] থেকে আনা হলো
507800
Scribunto
text/plain
--[=[
This module contains lang-specific affix mappings for French.
]=]
local m_affix = require("Module:affix")
local affix_variants = m_affix.affix_variants
local affix_mappings = affix_variants("-ation", {"-tion", "-xion", "-sion"})
return {affix_mappings = affix_mappings}
n1eo8ddggq4rq0tscuj1wfe8cjyf6jb
মডিউল:affix/lang-data/sah
828
167301
507801
2026-04-14T08:36:02Z
RedminBot
9553
[[en:Module:affix/lang-data/sah
]] থেকে আনা হলো
507801
Scribunto
text/plain
--[=[
This module contains lang-specific affix mappings for Yakut.
]=]
local m_affix = require("Module:affix")
local affix_variants = m_affix.affix_variants
local merge_tables = m_affix.merge_tables
local affix_mappings = merge_tables(
affix_variants("-ис", {"-үс", "-ыс", "-ус"}),
affix_variants("-ии", {"-үү", "-ыы", "-уу"})
)
return {affix_mappings = affix_mappings}
17lj4vz734ope2gky32khnml72udday
মডিউল:affix/lang-data/tk
828
167302
507802
2026-04-14T08:36:03Z
RedminBot
9553
[[en:Module:affix/lang-data/tk
]] থেকে আনা হলো
507802
Scribunto
text/plain
--[=[
This module contains lang-specific affix mappings for Turkmen.
]=]
local affix_mappings = {
["-ly"] = "-li"
}
return {affix_mappings = affix_mappings}
d0ppprfyyvspz3qn1f6wsev7c9jfbh3
মডিউল:affix/lang-data/trk-pro
828
167303
507803
2026-04-14T08:36:03Z
RedminBot
9553
[[en:Module:affix/lang-data/trk-pro
]] থেকে আনা হলো
507803
Scribunto
text/plain
--[=[
This module contains lang-specific affix mappings for Proto-Turkic.
]=]
local m_affix = require("Module:affix")
local affix_variants = m_affix.affix_variants
local merge_tables = m_affix.merge_tables
local affix_mappings = merge_tables(
affix_variants("*-gü", {"*-gu"})
)
return {affix_mappings = affix_mappings}
49hyevop9smjhv7y005yivukiw7fjgx
মডিউল:affix doc
828
167304
507804
2026-04-14T08:36:05Z
RedminBot
9553
[[en:Module:affix doc
]] থেকে আনা হলো
507804
Scribunto
text/plain
--[=[
This module contains functions to display user-readable tables of affix-related information.
Author: Benwing2
]=]
local export = {}
local m_affix = require("Module:affix")
local function make_code(text)
return "<code>" .. text .. "</code>"
end
function export.etymology_type_table(frame)
local alldata = {}
-- Convert table of codes to a list of information.
for etytype, desc in pairs(m_affix.etymology_types) do
local alias_of
if type(desc) == "string" then
alias_of = make_code(desc)
desc = m_affix.etymology_types[desc]
else
alias_of = "—"
end
local cat
if desc.borrowing_type then
cat = ("%s from <var>source</var>"):format(desc.borrowing_type)
else
cat = desc.cat
end
local obj = {
type = etytype,
text = desc.text,
cat = cat,
alias_of = alias_of,
}
table.insert(alldata, obj)
end
table.sort(alldata, function(obj1, obj2)
return obj1.type < obj2.type
end)
-- Convert to wikitable.
local parts = {}
table.insert(parts, '{|class="wikitable"')
table.insert(parts, "! Type !! Alias of !! Display <small>(click on link for explanation)</small> !! Category")
local last_type = nil
for _, obj in ipairs(alldata) do
table.insert(parts, "|-")
local sparts = {}
table.insert(sparts, make_code(obj.type))
table.insert(sparts, obj.alias_of)
table.insert(sparts, obj.text)
table.insert(sparts, make_code("<var>lang</var> " .. obj.cat))
table.insert(parts, "| " .. table.concat(sparts, " || "))
end
table.insert(parts, "|}")
return table.concat(parts, "\n")
end
return export
a6e0ayid713h0q1e7b9huwpl0ojjucp