You could set up a series of user-defined functions which could parse through the column. It would likely be slow and wouldn't be robust at all.
As an example though (with no real error checking, etc. and only minimally tested):
IF OBJECT_ID('dbo.Test_CSV_Search') IS NOT NULL
DROP TABLE dbo.Test_CSV_Search
GO
CREATE TABLE dbo.Test_CSV_Search
(
my_id INT IDENTITY NOT NULL,
txt VARCHAR(MAX) NOT NULL,
CONSTRAINT PK_Test_CSV_Search PRIMARY KEY CLUSTERED (my_id)
)
GO
INSERT INTO dbo.Test_CSV_Search (txt) VALUES ('11, 12, 13, 14,15,16
21,22, 23,24, 25,26
31,22,33,34,35,36')
GO
IF OBJECT_ID('dbo.Get_CSV_Row') IS NOT NULL
DROP FUNCTION dbo.Get_CSV_Row
GO
CREATE FUNCTION dbo.Get_CSV_Row
(@my_id INT, @col_num SMALLINT, @search_value VARCHAR(100))
RETURNS @results TABLE (row_num INT, row_txt VARCHAR(MAX))
AS
BEGIN
DECLARE
@csv_txt VARCHAR(MAX),
@full_row VARCHAR(MAX),
@start_pos INT,
@end_pos INT,
@col_txt VARCHAR(100),
@cur_col SMALLINT,
@line_start INT,
@line_end INT,
@row_num INT
SELECT @csv_txt = txt + CHAR(10) FROM dbo.Test_CSV_Search WHERE my_id = @my_id
SELECT
@line_start = 1,
@cur_col = 1,
@start_pos = 1,
@row_num = 1
WHILE (CHARINDEX(CHAR(10), @csv_txt, @line_start) > 0)
BEGIN
SELECT
@line_end = CHARINDEX(CHAR(10), @csv_txt, @line_start),
@end_pos = CHARINDEX(',', @csv_txt, @start_pos)
WHILE (@cur_col < @col_num)
BEGIN
SET @start_pos = @end_pos + 1
SET @end_pos = CHARINDEX(',', @csv_txt, @start_pos)
SET @cur_col = @cur_col + 1
END
IF (RTRIM(LTRIM(SUBSTRING(@csv_txt, @start_pos, @end_pos - @start_pos))) = @search_value)
BEGIN
INSERT INTO @results (row_num, row_txt) VALUES (@row_num, RTRIM(LTRIM(SUBSTRING(@csv_txt, @line_start, @line_end - @line_start))))
END
SELECT
@line_start = @line_end + 1,
@start_pos = @line_end + 1,
@cur_col = 1,
@row_num = @row_num + 1
END
RETURN
END
GO
SELECT * FROM dbo.Get_CSV_Row(1, 1, '11')