求职笔试题
PDD
最长公共子序列
1143-最长公共子序列
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
"""
二维动态规划
"""
m, n = len(text1), len(text2)
# dp = [[0]* (n+1)] * (m+1) 这种写法错误,m+1行会共享存储
dp = [[0] * (n + 1) for _ in range(m + 1)]
print('dp2', len(dp), len(dp[0]))
for i in range(1, m+1):
for j in range(1, n+1):
if text1[i-1] == text2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[-1][-1]
岛屿数量
200-岛屿数量
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
"""
遇到1,深度优先搜索(递归遍历上下左右 类比二叉树),搜索到后置为0 防止再次搜索
"""
def dfs(grid, i, j):
if not 0 <= i < len(grid) or not 0 <= j < len(grid[0]) or grid[i][j] == '0':
return
grid[i][j] = '0'
dfs(grid, i-1, j)
dfs(grid, i, j-1)
dfs(grid, i, j+1)
dfs(grid, i+1, j)
cnt = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
dfs(grid, i, j)
cnt += 1
return cnt
x的平方根
69-x的平方根
class Solution:
def mySqrt(self, x: int) -> int:
"""
二分查找
"""
left, right = 0, x
while left <= right:
mid = (left + right) // 2
if mid * mid <= x:
left = mid + 1
else:
right = mid - 1
else:
mid = (left + right) // 2
return mid
智谱
气温变化趋势
LCP 61. 气温变化趋势
class Solution:
def temperatureTrend(self, temperatureA: List[int], temperatureB: List[int]) -> int:
def trans(t1, t2):
"""获取趋势"""
if t2 > t1:
return '+'
elif t2 == t1:
return '0'
else:
return '-'
occ_cnt, max_occ = 0, 0
for i in range(1, len(temperatureA)):
if trans(temperatureA[i-1], temperatureA[i]) == trans(temperatureB[i-1], temperatureB[i]):
occ_cnt += 1
else:
max_occ = max(max_occ, occ_cnt)
occ_cnt = 0
max_occ = max(max_occ, occ_cnt)
return max_occ
数组创建二叉树
from typing import List, Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def array_to_btree(arr: List[Optional[int]], index: int = 0) -> Optional[TreeNode]:
"""
递归地将数组转换为二叉树。
:param arr: 包含树节点值的列表,其中 None 代表空节点。
:param index: 当前索引,默认从0开始。
:return: 二叉树的根节点。
"""
if index >= len(arr):
return None
root = TreeNode(arr[index])
root.left = array_to_btree(arr, 2 * index + 1)
root.right = array_to_btree(arr, 2 * index + 2)
return root
def preorder_traversal(root: Optional[TreeNode]):
"""前序遍历二叉树"""
if root:
print(root.val, end=" ")
preorder_traversal(root.left)
preorder_traversal(root.right)
# 示例数组,表示按层序遍历的二叉树
arr = [1, 2, 3, 4, 5, 6, 7]
tree_root = array_to_btree(arr)
print("前序遍历结果:")
preorder_traversal(tree_root)
# 栈
def array_to_btree(arr: List[Optional[int]]) -> Optional[TreeNode]:
"""
使用栈将数组转换为二叉树。
:param arr: 包含树节点值的列表,其中 None 代表空节点。
:return: 二叉树的根节点。
"""
if not arr:
return None
root = TreeNode(arr[0])
stack = [(root, 0)]
while stack:
node, index = stack.pop()
left_index, right_index = 2 * index + 1, 2 * index + 2
if left_index < len(arr) and arr[left_index] is not None:
node.left = TreeNode(arr[left_index])
stack.append((node.left, left_index))
if right_index < len(arr) and arr[right_index] is not None:
node.right = TreeNode(arr[right_index])
stack.append((node.right, right_index))
return root
虾皮
多头自注意力
import torch
import torch.nn as nn
class MultiHeadSelfAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.1):
super(MultiHeadSelfAttention, self).__init__()
assert embed_dim % num_heads == 0, "Embedding dimension must be divisible by number of heads"
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim ** 0.5
# Linear layers for query, key, and value
self.qkv = nn.Linear(embed_dim, embed_dim * 3)
self.out = nn.Linear(embed_dim, embed_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
batch_size, seq_len, embed_dim = x.size()
# Generate Q, K, V from input
qkv = self.qkv(x) # (batch_size, seq_len, embed_dim * 3)
qkv = qkv.view(batch_size, seq_len, 3, self.num_heads, self.head_dim)
q, k, v = qkv.permute(2, 0, 3, 1, 4) # (3, batch_size, num_heads, seq_len, head_dim)
# Scaled dot-product attention
scores = torch.matmul(q, k.transpose(-2, -1)) / self.scale # (batch_size, num_heads, seq_len, seq_len)
attn = torch.softmax(scores, dim=-1)
attn = self.dropout(attn)
out = torch.matmul(attn, v) # (batch_size, num_heads, seq_len, head_dim)
out = out.permute(0, 2, 1, 3).contiguous() # (batch_size, seq_len, num_heads, head_dim)
out = out.view(batch_size, seq_len, -1) # (batch_size, seq_len, embed_dim)
# Final linear projection
out = self.out(out)
return out
# 示例
batch_size = 2
seq_len = 4
embed_dim = 32
num_heads = 4
x = torch.randn(batch_size, seq_len, embed_dim)
attention = MultiHeadSelfAttention(embed_dim, num_heads)
output = attention(x)
print("Input shape:", x.shape)
print("Output shape:", output.shape)